
The CIFAR-10 dataset (Canadian Institute For Advanced Research) is a collection of images that are commonly used to train machine learning and computer vision algorithms. It is one of the most widely used datasets for machine learning research. The CIFAR-10 dataset contains 60,000 32x32 color images in 10 different classes. The 10 different classes represent airplanes, cars, birds, cats, deer, dogs, frogs, horses, ships, and trucks. There are 6,000 images of each class.
import datetime
import time
import numpy as np
import pandas as pd
from packaging import version
from collections import Counter
import cv2
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error as MSE
from sklearn.model_selection import train_test_split
from sklearn.manifold import TSNE
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
import matplotlib as mpl
import seaborn as sns
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import models, layers
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPool2D, BatchNormalization, Dropout, Flatten, Input, Dense
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
from tensorflow.keras.preprocessing import image
from tensorflow.keras.utils import to_categorical
%matplotlib inline
np.set_printoptions(precision=3, suppress=True)
print("This notebook requires TensorFlow 2.0 or above")
print("TensorFlow version: ", tf.__version__)
assert version.parse(tf.__version__).release[0] >=2
This notebook requires TensorFlow 2.0 or above TensorFlow version: 2.6.0
print("Keras version: ", keras.__version__)
Keras version: 2.6.0
from google.colab import drive
drive.mount('/content/gdrive')
Drive already mounted at /content/gdrive; to attempt to forcibly remount, call drive.mount("/content/gdrive", force_remount=True).
The CIFAR-10 dataset consists of 60000 32x32 colour images in 10 classes, with 6000 images per class. There are 50000 training images and 10000 test images.
The dataset is divided into five training batches and one test batch, each with 10000 images. The test batch contains exactly 1000 randomly-selected images from each class. The training batches contain the remaining images in random order, but some training batches may contain more images from one class than another. Between them, the training batches contain exactly 5000 images from each class.
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
The labels are an array of integers, ranging from 0 to 9. These correspond to the class of clothing the image represents:
| Label | Class_ |
|---|---|
| 0 | airplane |
| 1 | automobile |
| 2 | bird |
| 3 | cat |
| 4 | deer |
| 5 | dog |
| 6 | frog |
| 7 | horse |
| 8 | ship |
| 9 | truck |
class_names = ['airplane'
,'automobile'
,'bird'
,'cat'
,'deer'
,'dog'
,'frog'
,'horse'
,'ship'
,'truck']
from tabulate import tabulate
label_cifar = pd.DataFrame([[0,1,2,3,4,5,6,7,8,9],class_names]).T
label_cifar.columns = ["Label", "Classes"]
label_cifar = label_cifar.set_index(["Label"])
print(tabulate(label_cifar, headers='keys', tablefmt='fancy_grid'))
╒═════════╤════════════╕ │ Label │ Classes │ ╞═════════╪════════════╡ │ 0 │ airplane │ ├─────────┼────────────┤ │ 1 │ automobile │ ├─────────┼────────────┤ │ 2 │ bird │ ├─────────┼────────────┤ │ 3 │ cat │ ├─────────┼────────────┤ │ 4 │ deer │ ├─────────┼────────────┤ │ 5 │ dog │ ├─────────┼────────────┤ │ 6 │ frog │ ├─────────┼────────────┤ │ 7 │ horse │ ├─────────┼────────────┤ │ 8 │ ship │ ├─────────┼────────────┤ │ 9 │ truck │ ╘═════════╧════════════╛
x_train_split, x_valid_split, y_train_split, y_valid_split = train_test_split(x_train
,y_train
,test_size=.1
,random_state=42
,shuffle=True)
print("Training", x_train_split.shape, "Validation", x_valid_split.shape, "Test", x_test.shape)
Training (45000, 32, 32, 3) Validation (5000, 32, 32, 3) Test (10000, 32, 32, 3)
cifar_classes = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
print('Example training images and their labels: ' + str([x[0] for x in y_train[5:10]]))
print('Corresponding classes for the labels: ' + str([cifar_classes[x[0]] for x in y_train[5:10]]))
f, axarr = plt.subplots(1, 5)
f.set_size_inches(16, 6)
for i in range(5):
img = x_train[i+5]
axarr[i].imshow(img)
plt.show()
Example training images and their labels: [1, 2, 7, 8, 3] Corresponding classes for the labels: ['automobile', 'bird', 'horse', 'ship', 'cat']
The images are 28x28 NumPy arrays, with pixel values ranging from 0 to 255
x_train_norm = x_train_split/255
x_valid_norm = x_valid_split/255
x_test_norm = x_test/255

def compile_train_model(model, x_train, y_train, x_valid, y_valid, epochs=200, optimizer = 'RMSprop', batch_size= 512):
timestamp=int(time.time())
# Compile
model.compile(optimizer= optimizer,
loss = 'SparseCategoricalCrossentropy',
metrics=['accuracy'])
# Train
start_time = time.time()
history = model.fit(
x_train,
y_train,
epochs=epochs,
batch_size= batch_size,
validation_data=(x_valid, y_valid),
#verbose=0,
callbacks=[
EarlyStopping(monitor='val_accuracy', patience=10),
ModelCheckpoint('/content/gdrive/My Drive/Colab Notebooks/models/model_{val_accuracy:.4f}.h5',
save_best_only=True,
save_weights_only=False,
monitor='val_accuracy')]
)
end_time = time.time()
second_elapsed = round(end_time - start_time)
print(f'Finished model training in {second_elapsed}s')
print('Model performance with training set')
model.evaluate(x_train, y_train)
print('Evaluating model performance with validation set')
model.evaluate(x_valid, y_valid)
return history, model
def plot_history(history):
losses = history.history['loss']
accs = history.history['accuracy']
val_losses = history.history['val_loss']
val_accs = history.history['val_accuracy']
epochs = len(losses)
plt.figure(figsize=(16, 4))
for i, metrics in enumerate(zip([losses, accs], [val_losses, val_accs], ['Loss', 'Accuracy'])):
plt.subplot(1, 2, i + 1)
plt.plot(range(epochs), metrics[0], label='Training {}'.format(metrics[2]))
plt.plot(range(epochs), metrics[1], label='Validation {}'.format(metrics[2]))
plt.legend()
plt.show()
def print_validation_report(y_test, predictions):
print("Classification Report")
print(classification_report(y_test, predictions))
print('Accuracy Score: {}'.format(accuracy_score(y_test, predictions)))
print('Root Mean Square Error: {}'.format(np.sqrt(MSE(y_test, predictions))))
def plot_confusion_matrix(y_true, y_pred):
mtx = confusion_matrix(y_true, y_pred)
fig, ax = plt.subplots(figsize=(8,8))
sns.heatmap(mtx, annot=True, fmt='d', linewidths=.75, cbar=False, ax=ax,cmap='Blues',linecolor='white')
# square=True,
plt.ylabel('true label')
plt.xlabel('predicted label')
def plot_confusion_matrix_labeled(y_true, y_pred):
mtx = confusion_matrix(y_true, y_pred)
# define classes
classes = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
temp_df = pd.DataFrame(data=mtx,columns=classes)
temp_df.index = classes
fig, ax = plt.subplots(figsize=(8,8))
sns.heatmap(temp_df, annot=True, fmt='d', linewidths=.75, cbar=False, ax=ax,cmap='Blues',linecolor='white')
# square=True,
plt.ylabel('true label')
plt.xlabel('predicted label')
We use a Sequential class defined in Keras to create our model. The first 9 layers Conv2D MaxPooling, Dropout handle feature learning. The last 3 layers, handle classification
model = Sequential([
Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu,input_shape=x_train_norm.shape[1:]),
MaxPool2D((2, 2),strides=2),
Dropout(0.3),
Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu),
MaxPool2D((2, 2),strides=2),
Dropout(0.3),
Conv2D(filters=512, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu),
MaxPool2D((2, 2),strides=2),
Dropout(0.3),
Flatten(),
Dense(units=384,activation=tf.nn.relu,kernel_regularizer=tf.keras.regularizers.L2(0.001)),
BatchNormalization(),
Dropout(0.3),
Dense(units=10, activation=tf.nn.softmax)
])
model.summary()
Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 30, 30, 128) 3584 _________________________________________________________________ max_pooling2d (MaxPooling2D) (None, 15, 15, 128) 0 _________________________________________________________________ dropout (Dropout) (None, 15, 15, 128) 0 _________________________________________________________________ conv2d_1 (Conv2D) (None, 13, 13, 256) 295168 _________________________________________________________________ max_pooling2d_1 (MaxPooling2 (None, 6, 6, 256) 0 _________________________________________________________________ dropout_1 (Dropout) (None, 6, 6, 256) 0 _________________________________________________________________ conv2d_2 (Conv2D) (None, 4, 4, 512) 1180160 _________________________________________________________________ max_pooling2d_2 (MaxPooling2 (None, 2, 2, 512) 0 _________________________________________________________________ dropout_2 (Dropout) (None, 2, 2, 512) 0 _________________________________________________________________ flatten (Flatten) (None, 2048) 0 _________________________________________________________________ dense (Dense) (None, 384) 786816 _________________________________________________________________ batch_normalization (BatchNo (None, 384) 1536 _________________________________________________________________ dropout_3 (Dropout) (None, 384) 0 _________________________________________________________________ dense_1 (Dense) (None, 10) 3850 ================================================================= Total params: 2,271,114 Trainable params: 2,270,346 Non-trainable params: 768 _________________________________________________________________
# Compile and train
history, model = compile_train_model(model,
x_train_norm, y_train_split,
x_valid_norm, y_valid_split
)
Epoch 1/200 88/88 [==============================] - 22s 52ms/step - loss: 2.1192 - accuracy: 0.3873 - val_loss: 2.3822 - val_accuracy: 0.1258 Epoch 2/200 88/88 [==============================] - 3s 38ms/step - loss: 1.4236 - accuracy: 0.5537 - val_loss: 2.1387 - val_accuracy: 0.3564 Epoch 3/200 88/88 [==============================] - 3s 38ms/step - loss: 1.2085 - accuracy: 0.6168 - val_loss: 1.8309 - val_accuracy: 0.5276 Epoch 4/200 88/88 [==============================] - 3s 39ms/step - loss: 1.0760 - accuracy: 0.6572 - val_loss: 1.5007 - val_accuracy: 0.5948 Epoch 5/200 88/88 [==============================] - 3s 38ms/step - loss: 0.9864 - accuracy: 0.6871 - val_loss: 1.1098 - val_accuracy: 0.7038 Epoch 6/200 88/88 [==============================] - 3s 38ms/step - loss: 0.9079 - accuracy: 0.7107 - val_loss: 1.2657 - val_accuracy: 0.5772 Epoch 7/200 88/88 [==============================] - 3s 38ms/step - loss: 0.8623 - accuracy: 0.7280 - val_loss: 0.8909 - val_accuracy: 0.7152 Epoch 8/200 88/88 [==============================] - 3s 38ms/step - loss: 0.8053 - accuracy: 0.7469 - val_loss: 1.0544 - val_accuracy: 0.6604 Epoch 9/200 88/88 [==============================] - 3s 38ms/step - loss: 0.7637 - accuracy: 0.7600 - val_loss: 0.8121 - val_accuracy: 0.7586 Epoch 10/200 88/88 [==============================] - 3s 38ms/step - loss: 0.7256 - accuracy: 0.7750 - val_loss: 0.7595 - val_accuracy: 0.7678 Epoch 11/200 88/88 [==============================] - 3s 38ms/step - loss: 0.6897 - accuracy: 0.7896 - val_loss: 0.7724 - val_accuracy: 0.7658 Epoch 12/200 88/88 [==============================] - 3s 38ms/step - loss: 0.6622 - accuracy: 0.7995 - val_loss: 0.8039 - val_accuracy: 0.7602 Epoch 13/200 88/88 [==============================] - 3s 38ms/step - loss: 0.6346 - accuracy: 0.8080 - val_loss: 0.7728 - val_accuracy: 0.7628 Epoch 14/200 88/88 [==============================] - 3s 38ms/step - loss: 0.6126 - accuracy: 0.8160 - val_loss: 0.8302 - val_accuracy: 0.7530 Epoch 15/200 88/88 [==============================] - 3s 38ms/step - loss: 0.5916 - accuracy: 0.8243 - val_loss: 0.8048 - val_accuracy: 0.7666 Epoch 16/200 88/88 [==============================] - 3s 38ms/step - loss: 0.5721 - accuracy: 0.8313 - val_loss: 0.8027 - val_accuracy: 0.7642 Epoch 17/200 88/88 [==============================] - 3s 38ms/step - loss: 0.5556 - accuracy: 0.8386 - val_loss: 0.7407 - val_accuracy: 0.7812 Epoch 18/200 88/88 [==============================] - 3s 38ms/step - loss: 0.5411 - accuracy: 0.8424 - val_loss: 0.8670 - val_accuracy: 0.7502 Epoch 19/200 88/88 [==============================] - 3s 38ms/step - loss: 0.5205 - accuracy: 0.8486 - val_loss: 0.7486 - val_accuracy: 0.7834 Epoch 20/200 88/88 [==============================] - 3s 39ms/step - loss: 0.5022 - accuracy: 0.8568 - val_loss: 0.8603 - val_accuracy: 0.7586 Epoch 21/200 88/88 [==============================] - 3s 38ms/step - loss: 0.4900 - accuracy: 0.8616 - val_loss: 0.8138 - val_accuracy: 0.7818 Epoch 22/200 88/88 [==============================] - 3s 38ms/step - loss: 0.4840 - accuracy: 0.8654 - val_loss: 0.7716 - val_accuracy: 0.7808 Epoch 23/200 88/88 [==============================] - 3s 38ms/step - loss: 0.4694 - accuracy: 0.8690 - val_loss: 0.7736 - val_accuracy: 0.7820 Epoch 24/200 88/88 [==============================] - 3s 38ms/step - loss: 0.4503 - accuracy: 0.8751 - val_loss: 0.8002 - val_accuracy: 0.7612 Epoch 25/200 88/88 [==============================] - 3s 38ms/step - loss: 0.4453 - accuracy: 0.8793 - val_loss: 0.7596 - val_accuracy: 0.7944 Epoch 26/200 88/88 [==============================] - 3s 38ms/step - loss: 0.4400 - accuracy: 0.8824 - val_loss: 0.8869 - val_accuracy: 0.7670 Epoch 27/200 88/88 [==============================] - 3s 38ms/step - loss: 0.4301 - accuracy: 0.8846 - val_loss: 0.7696 - val_accuracy: 0.7942 Epoch 28/200 88/88 [==============================] - 3s 38ms/step - loss: 0.4236 - accuracy: 0.8872 - val_loss: 0.8709 - val_accuracy: 0.7640 Epoch 29/200 88/88 [==============================] - 3s 38ms/step - loss: 0.4160 - accuracy: 0.8908 - val_loss: 0.7202 - val_accuracy: 0.8058 Epoch 30/200 88/88 [==============================] - 3s 38ms/step - loss: 0.4011 - accuracy: 0.8932 - val_loss: 0.7192 - val_accuracy: 0.8014 Epoch 31/200 88/88 [==============================] - 3s 38ms/step - loss: 0.3976 - accuracy: 0.8975 - val_loss: 0.7656 - val_accuracy: 0.7966 Epoch 32/200 88/88 [==============================] - 3s 38ms/step - loss: 0.3917 - accuracy: 0.8982 - val_loss: 0.8494 - val_accuracy: 0.7792 Epoch 33/200 88/88 [==============================] - 3s 38ms/step - loss: 0.3872 - accuracy: 0.9007 - val_loss: 0.7972 - val_accuracy: 0.7966 Epoch 34/200 88/88 [==============================] - 3s 38ms/step - loss: 0.3806 - accuracy: 0.9027 - val_loss: 0.7409 - val_accuracy: 0.8028 Epoch 35/200 88/88 [==============================] - 3s 38ms/step - loss: 0.3720 - accuracy: 0.9066 - val_loss: 0.7321 - val_accuracy: 0.8068 Epoch 36/200 88/88 [==============================] - 3s 38ms/step - loss: 0.3719 - accuracy: 0.9065 - val_loss: 0.7466 - val_accuracy: 0.7984 Epoch 37/200 88/88 [==============================] - 3s 38ms/step - loss: 0.3658 - accuracy: 0.9098 - val_loss: 0.8408 - val_accuracy: 0.7834 Epoch 38/200 88/88 [==============================] - 3s 38ms/step - loss: 0.3555 - accuracy: 0.9098 - val_loss: 0.7256 - val_accuracy: 0.8110 Epoch 39/200 88/88 [==============================] - 3s 38ms/step - loss: 0.3540 - accuracy: 0.9126 - val_loss: 0.7421 - val_accuracy: 0.8058 Epoch 40/200 88/88 [==============================] - 3s 38ms/step - loss: 0.3488 - accuracy: 0.9135 - val_loss: 0.7303 - val_accuracy: 0.8102 Epoch 41/200 88/88 [==============================] - 3s 38ms/step - loss: 0.3416 - accuracy: 0.9152 - val_loss: 0.8374 - val_accuracy: 0.7856 Epoch 42/200 88/88 [==============================] - 3s 38ms/step - loss: 0.3397 - accuracy: 0.9168 - val_loss: 0.7281 - val_accuracy: 0.8116 Epoch 43/200 88/88 [==============================] - 3s 38ms/step - loss: 0.3319 - accuracy: 0.9196 - val_loss: 0.7447 - val_accuracy: 0.8058 Epoch 44/200 88/88 [==============================] - 3s 38ms/step - loss: 0.3293 - accuracy: 0.9218 - val_loss: 0.7667 - val_accuracy: 0.8024 Epoch 45/200 88/88 [==============================] - 3s 39ms/step - loss: 0.3228 - accuracy: 0.9211 - val_loss: 0.7123 - val_accuracy: 0.8114 Epoch 46/200 88/88 [==============================] - 3s 38ms/step - loss: 0.3182 - accuracy: 0.9237 - val_loss: 0.7142 - val_accuracy: 0.8146 Epoch 47/200 88/88 [==============================] - 3s 38ms/step - loss: 0.3170 - accuracy: 0.9232 - val_loss: 0.7240 - val_accuracy: 0.8140 Epoch 48/200 88/88 [==============================] - 3s 38ms/step - loss: 0.3155 - accuracy: 0.9253 - val_loss: 0.7488 - val_accuracy: 0.8190 Epoch 49/200 88/88 [==============================] - 3s 38ms/step - loss: 0.3178 - accuracy: 0.9234 - val_loss: 0.7476 - val_accuracy: 0.8140 Epoch 50/200 88/88 [==============================] - 3s 38ms/step - loss: 0.3086 - accuracy: 0.9271 - val_loss: 0.8559 - val_accuracy: 0.7844 Epoch 51/200 88/88 [==============================] - 3s 38ms/step - loss: 0.3011 - accuracy: 0.9293 - val_loss: 0.7535 - val_accuracy: 0.8074 Epoch 52/200 88/88 [==============================] - 3s 38ms/step - loss: 0.3084 - accuracy: 0.9260 - val_loss: 0.7488 - val_accuracy: 0.8052 Epoch 53/200 88/88 [==============================] - 3s 38ms/step - loss: 0.3004 - accuracy: 0.9289 - val_loss: 0.7324 - val_accuracy: 0.8122 Epoch 54/200 88/88 [==============================] - 3s 38ms/step - loss: 0.2951 - accuracy: 0.9303 - val_loss: 0.8120 - val_accuracy: 0.7962 Epoch 55/200 88/88 [==============================] - 3s 38ms/step - loss: 0.2898 - accuracy: 0.9332 - val_loss: 0.7123 - val_accuracy: 0.8192 Epoch 56/200 88/88 [==============================] - 3s 38ms/step - loss: 0.2892 - accuracy: 0.9323 - val_loss: 0.7829 - val_accuracy: 0.8096 Epoch 57/200 88/88 [==============================] - 3s 38ms/step - loss: 0.2869 - accuracy: 0.9336 - val_loss: 0.9364 - val_accuracy: 0.7740 Epoch 58/200 88/88 [==============================] - 3s 38ms/step - loss: 0.2845 - accuracy: 0.9347 - val_loss: 0.7204 - val_accuracy: 0.8166 Epoch 59/200 88/88 [==============================] - 3s 38ms/step - loss: 0.2752 - accuracy: 0.9375 - val_loss: 0.8667 - val_accuracy: 0.7842 Epoch 60/200 88/88 [==============================] - 3s 38ms/step - loss: 0.2806 - accuracy: 0.9345 - val_loss: 0.8447 - val_accuracy: 0.7844 Epoch 61/200 88/88 [==============================] - 3s 38ms/step - loss: 0.2784 - accuracy: 0.9365 - val_loss: 0.7574 - val_accuracy: 0.8070 Epoch 62/200 88/88 [==============================] - 3s 38ms/step - loss: 0.2734 - accuracy: 0.9372 - val_loss: 0.7866 - val_accuracy: 0.8070 Epoch 63/200 88/88 [==============================] - 3s 38ms/step - loss: 0.2666 - accuracy: 0.9389 - val_loss: 0.8315 - val_accuracy: 0.7928 Epoch 64/200 88/88 [==============================] - 3s 38ms/step - loss: 0.2697 - accuracy: 0.9381 - val_loss: 0.7322 - val_accuracy: 0.8172 Epoch 65/200 88/88 [==============================] - 3s 38ms/step - loss: 0.2620 - accuracy: 0.9411 - val_loss: 0.7459 - val_accuracy: 0.8182 Finished model training in 241s Model performance with training set 1407/1407 [==============================] - 5s 3ms/step - loss: 0.1085 - accuracy: 0.9983 Evaluating model performance with validation set 157/157 [==============================] - 1s 3ms/step - loss: 0.7459 - accuracy: 0.8182
# Plot the training metrics
plot_history(history)
Using both sklearn.metrics. Then we visualize the confusion matrix and see what that tells us.
pred0= model.predict(x_test_norm)
pred0=np.argmax(pred0, axis=1)
print_validation_report(y_test, pred0)
Classification Report
precision recall f1-score support
0 0.80 0.84 0.82 1000
1 0.93 0.90 0.92 1000
2 0.72 0.74 0.73 1000
3 0.58 0.73 0.65 1000
4 0.82 0.78 0.80 1000
5 0.82 0.63 0.71 1000
6 0.82 0.88 0.85 1000
7 0.91 0.79 0.85 1000
8 0.87 0.90 0.89 1000
9 0.90 0.87 0.88 1000
accuracy 0.81 10000
macro avg 0.82 0.81 0.81 10000
weighted avg 0.82 0.81 0.81 10000
Accuracy Score: 0.8073
Root Mean Square Error: 1.7619875141441836
plot_confusion_matrix_labeled(y_test,pred0)
# Extracts the outputs of all layers:
layer_outputs = [layer.output for layer in model.layers]
# Creates a model that will return these outputs, given the model input:
activation_model = models.Model(inputs=model.input, outputs=layer_outputs)
# Get activation values for the last dense layer
activations = activation_model.predict(x_valid_norm[:5000])
dense_layer_activations = activations[-3]
output_layer_activations = activations[-1]
https://scikit-learn.org/stable/modules/generated/sklearn.manifold.TSNE.html
# Reduce the dimension using T-SNE to visualize i n a scatterplot
tsne = TSNE(n_components=2, verbose=1, perplexity=40, n_iter=300)
tsne_results = tsne.fit_transform(dense_layer_activations)
# Scaling
tsne_results = (tsne_results - tsne_results.min()) / (tsne_results.max() - tsne_results.min())
[t-SNE] Computing 121 nearest neighbors... [t-SNE] Indexed 5000 samples in 0.179s... [t-SNE] Computed neighbors for 5000 samples in 15.770s... [t-SNE] Computed conditional probabilities for sample 1000 / 5000 [t-SNE] Computed conditional probabilities for sample 2000 / 5000 [t-SNE] Computed conditional probabilities for sample 3000 / 5000 [t-SNE] Computed conditional probabilities for sample 4000 / 5000 [t-SNE] Computed conditional probabilities for sample 5000 / 5000 [t-SNE] Mean sigma: 1.086854 [t-SNE] KL divergence after 250 iterations with early exaggeration: 78.487518 [t-SNE] KL divergence after 300 iterations: 2.199147
cmap = plt.cm.tab10
plt.figure(figsize=(16,10))
scatter = plt.scatter(tsne_results[:,0],tsne_results[:,1], c=y_valid_split[:5000], s=10, cmap=cmap)
plt.legend(handles=scatter.legend_elements()[0], labels=class_names)
image_positions = np.array([[1., 1.]])
for index, position in enumerate(tsne_results):
dist = np.sum((position - image_positions) ** 2, axis=1)
if np.min(dist) > 0.02: # if far enough from other images
image_positions = np.r_[image_positions, [position]]
imagebox = mpl.offsetbox.AnnotationBbox(
mpl.offsetbox.OffsetImage(x_train[index], cmap="binary"),
position, bboxprops={"lw": 1})
plt.gca().add_artist(imagebox)
plt.axis("off")
plt.show()
32*32*3
3072
#Flatten the normalized training, validation and test data for MLP
print(x_train_split.shape, x_valid_split.shape, x_test.shape)
(45000, 32, 32, 3) (5000, 32, 32, 3) (10000, 32, 32, 3)
x_train_flat = np.reshape(x_train_norm, (45000, 3072))
x_valid_flat = np.reshape(x_valid_norm, (5000, 3072))
x_test_flat = np.reshape(x_test_norm, (10000, 3072))
model_1 = Sequential([
Dense(input_shape=[3072], units=512, activation = tf.nn.relu),
Dense( units=512, activation = tf.nn.relu),
Dense(name = "output_layer", units = 10, activation = tf.nn.softmax)
])
model_1.summary()
Model: "sequential_1" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= dense_2 (Dense) (None, 512) 1573376 _________________________________________________________________ dense_3 (Dense) (None, 512) 262656 _________________________________________________________________ output_layer (Dense) (None, 10) 5130 ================================================================= Total params: 1,841,162 Trainable params: 1,841,162 Non-trainable params: 0 _________________________________________________________________
history_1, model_1 = compile_train_model(model_1,
x_train_flat, y_train_split,
x_valid_flat, y_valid_split
)
Epoch 1/200 88/88 [==============================] - 1s 8ms/step - loss: 2.8006 - accuracy: 0.1911 - val_loss: 2.1183 - val_accuracy: 0.2172 Epoch 2/200 88/88 [==============================] - 0s 5ms/step - loss: 2.0003 - accuracy: 0.2749 - val_loss: 1.8932 - val_accuracy: 0.3118 Epoch 3/200 88/88 [==============================] - 0s 6ms/step - loss: 1.8988 - accuracy: 0.3187 - val_loss: 1.9583 - val_accuracy: 0.2970 Epoch 4/200 88/88 [==============================] - 1s 6ms/step - loss: 1.8292 - accuracy: 0.3436 - val_loss: 1.8171 - val_accuracy: 0.3362 Epoch 5/200 88/88 [==============================] - 1s 6ms/step - loss: 1.7800 - accuracy: 0.3638 - val_loss: 1.6750 - val_accuracy: 0.4076 Epoch 6/200 88/88 [==============================] - 1s 6ms/step - loss: 1.7377 - accuracy: 0.3832 - val_loss: 1.7423 - val_accuracy: 0.3686 Epoch 7/200 88/88 [==============================] - 1s 6ms/step - loss: 1.6942 - accuracy: 0.3950 - val_loss: 1.6605 - val_accuracy: 0.4022 Epoch 8/200 88/88 [==============================] - 1s 6ms/step - loss: 1.6512 - accuracy: 0.4128 - val_loss: 1.6959 - val_accuracy: 0.3950 Epoch 9/200 88/88 [==============================] - 0s 6ms/step - loss: 1.6201 - accuracy: 0.4246 - val_loss: 1.6738 - val_accuracy: 0.3930 Epoch 10/200 88/88 [==============================] - 0s 5ms/step - loss: 1.5916 - accuracy: 0.4324 - val_loss: 1.6469 - val_accuracy: 0.4184 Epoch 11/200 88/88 [==============================] - 1s 6ms/step - loss: 1.5771 - accuracy: 0.4425 - val_loss: 1.6433 - val_accuracy: 0.4172 Epoch 12/200 88/88 [==============================] - 0s 5ms/step - loss: 1.5479 - accuracy: 0.4479 - val_loss: 1.5315 - val_accuracy: 0.4538 Epoch 13/200 88/88 [==============================] - 0s 6ms/step - loss: 1.5107 - accuracy: 0.4630 - val_loss: 1.6243 - val_accuracy: 0.4258 Epoch 14/200 88/88 [==============================] - 0s 6ms/step - loss: 1.5003 - accuracy: 0.4667 - val_loss: 1.6393 - val_accuracy: 0.4154 Epoch 15/200 88/88 [==============================] - 1s 6ms/step - loss: 1.4719 - accuracy: 0.4765 - val_loss: 1.5662 - val_accuracy: 0.4412 Epoch 16/200 88/88 [==============================] - 0s 5ms/step - loss: 1.4477 - accuracy: 0.4836 - val_loss: 1.5538 - val_accuracy: 0.4572 Epoch 17/200 88/88 [==============================] - 0s 5ms/step - loss: 1.4274 - accuracy: 0.4934 - val_loss: 1.5065 - val_accuracy: 0.4650 Epoch 18/200 88/88 [==============================] - 1s 6ms/step - loss: 1.4134 - accuracy: 0.4982 - val_loss: 1.5403 - val_accuracy: 0.4562 Epoch 19/200 88/88 [==============================] - 0s 6ms/step - loss: 1.3840 - accuracy: 0.5073 - val_loss: 1.5032 - val_accuracy: 0.4700 Epoch 20/200 88/88 [==============================] - 1s 6ms/step - loss: 1.3700 - accuracy: 0.5115 - val_loss: 1.4905 - val_accuracy: 0.4788 Epoch 21/200 88/88 [==============================] - 1s 6ms/step - loss: 1.3546 - accuracy: 0.5165 - val_loss: 1.5012 - val_accuracy: 0.4652 Epoch 22/200 88/88 [==============================] - 0s 5ms/step - loss: 1.3300 - accuracy: 0.5263 - val_loss: 1.5136 - val_accuracy: 0.4652 Epoch 23/200 88/88 [==============================] - 1s 6ms/step - loss: 1.3156 - accuracy: 0.5303 - val_loss: 1.5346 - val_accuracy: 0.4620 Epoch 24/200 88/88 [==============================] - 0s 5ms/step - loss: 1.3019 - accuracy: 0.5385 - val_loss: 1.4592 - val_accuracy: 0.4924 Epoch 25/200 88/88 [==============================] - 0s 6ms/step - loss: 1.2737 - accuracy: 0.5469 - val_loss: 1.5402 - val_accuracy: 0.4662 Epoch 26/200 88/88 [==============================] - 0s 6ms/step - loss: 1.2616 - accuracy: 0.5491 - val_loss: 1.4947 - val_accuracy: 0.4852 Epoch 27/200 88/88 [==============================] - 0s 5ms/step - loss: 1.2493 - accuracy: 0.5558 - val_loss: 1.4504 - val_accuracy: 0.4904 Epoch 28/200 88/88 [==============================] - 1s 6ms/step - loss: 1.2301 - accuracy: 0.5633 - val_loss: 1.5843 - val_accuracy: 0.4626 Epoch 29/200 88/88 [==============================] - 0s 5ms/step - loss: 1.2160 - accuracy: 0.5668 - val_loss: 1.5026 - val_accuracy: 0.4786 Epoch 30/200 88/88 [==============================] - 1s 7ms/step - loss: 1.1898 - accuracy: 0.5764 - val_loss: 1.5713 - val_accuracy: 0.4588 Epoch 31/200 88/88 [==============================] - 0s 5ms/step - loss: 1.1799 - accuracy: 0.5804 - val_loss: 1.4591 - val_accuracy: 0.4992 Epoch 32/200 88/88 [==============================] - 1s 6ms/step - loss: 1.1649 - accuracy: 0.5838 - val_loss: 1.5689 - val_accuracy: 0.4702 Epoch 33/200 88/88 [==============================] - 0s 6ms/step - loss: 1.1420 - accuracy: 0.5941 - val_loss: 1.5272 - val_accuracy: 0.4758 Epoch 34/200 88/88 [==============================] - 0s 5ms/step - loss: 1.1339 - accuracy: 0.5953 - val_loss: 1.5021 - val_accuracy: 0.4882 Epoch 35/200 88/88 [==============================] - 1s 6ms/step - loss: 1.1151 - accuracy: 0.6036 - val_loss: 1.6497 - val_accuracy: 0.4492 Epoch 36/200 88/88 [==============================] - 0s 5ms/step - loss: 1.1071 - accuracy: 0.6044 - val_loss: 1.4997 - val_accuracy: 0.5024 Epoch 37/200 88/88 [==============================] - 1s 6ms/step - loss: 1.0837 - accuracy: 0.6180 - val_loss: 1.5679 - val_accuracy: 0.4770 Epoch 38/200 88/88 [==============================] - 0s 5ms/step - loss: 1.0671 - accuracy: 0.6201 - val_loss: 1.6190 - val_accuracy: 0.4728 Epoch 39/200 88/88 [==============================] - 1s 6ms/step - loss: 1.0584 - accuracy: 0.6227 - val_loss: 1.5555 - val_accuracy: 0.4952 Epoch 40/200 88/88 [==============================] - 0s 6ms/step - loss: 1.0476 - accuracy: 0.6260 - val_loss: 1.6132 - val_accuracy: 0.4806 Epoch 41/200 88/88 [==============================] - 0s 5ms/step - loss: 1.0280 - accuracy: 0.6331 - val_loss: 1.5269 - val_accuracy: 0.5140 Epoch 42/200 88/88 [==============================] - 1s 6ms/step - loss: 1.0098 - accuracy: 0.6384 - val_loss: 1.5959 - val_accuracy: 0.4980 Epoch 43/200 88/88 [==============================] - 0s 5ms/step - loss: 0.9993 - accuracy: 0.6444 - val_loss: 1.6230 - val_accuracy: 0.4828 Epoch 44/200 88/88 [==============================] - 1s 6ms/step - loss: 0.9770 - accuracy: 0.6505 - val_loss: 1.5720 - val_accuracy: 0.5062 Epoch 45/200 88/88 [==============================] - 0s 5ms/step - loss: 0.9748 - accuracy: 0.6522 - val_loss: 1.6702 - val_accuracy: 0.4616 Epoch 46/200 88/88 [==============================] - 1s 6ms/step - loss: 0.9540 - accuracy: 0.6589 - val_loss: 1.6999 - val_accuracy: 0.4624 Epoch 47/200 88/88 [==============================] - 1s 6ms/step - loss: 0.9419 - accuracy: 0.6651 - val_loss: 1.5634 - val_accuracy: 0.4946 Epoch 48/200 88/88 [==============================] - 0s 5ms/step - loss: 0.9281 - accuracy: 0.6678 - val_loss: 1.5834 - val_accuracy: 0.4936 Epoch 49/200 88/88 [==============================] - 1s 6ms/step - loss: 0.9178 - accuracy: 0.6731 - val_loss: 1.6082 - val_accuracy: 0.5062 Epoch 50/200 88/88 [==============================] - 0s 5ms/step - loss: 0.9033 - accuracy: 0.6768 - val_loss: 1.6106 - val_accuracy: 0.4886 Epoch 51/200 88/88 [==============================] - 0s 6ms/step - loss: 0.8915 - accuracy: 0.6813 - val_loss: 1.6658 - val_accuracy: 0.4846 Finished model training in 28s Model performance with training set 1407/1407 [==============================] - 3s 2ms/step - loss: 0.8563 - accuracy: 0.6912 Evaluating model performance with validation set 157/157 [==============================] - 0s 2ms/step - loss: 1.6658 - accuracy: 0.4846
plot_history(history_1)
--------------------------------------------------------------------------- NameError Traceback (most recent call last) <ipython-input-201-95f024898615> in <module>() ----> 1 pred16.loss NameError: name 'pred16' is not defined
pred1 = model_1.predict(x_test_flat)
pred1 = np.argmax(pred1, axis=1)
print_validation_report(y_test, pred1)
Classification Report
precision recall f1-score support
0 0.64 0.38 0.48 1000
1 0.61 0.63 0.62 1000
2 0.36 0.43 0.39 1000
3 0.33 0.41 0.37 1000
4 0.41 0.50 0.45 1000
5 0.45 0.24 0.32 1000
6 0.48 0.64 0.55 1000
7 0.76 0.38 0.50 1000
8 0.55 0.74 0.63 1000
9 0.55 0.55 0.55 1000
accuracy 0.49 10000
macro avg 0.51 0.49 0.49 10000
weighted avg 0.51 0.49 0.49 10000
Accuracy Score: 0.4913
Root Mean Square Error: 3.046686724952206
plot_confusion_matrix_labeled( y_test, pred1)
model_2 = Sequential([
Dense(input_shape=[3072], units=1024, activation = tf.nn.relu),
Dense( units=512, activation = tf.nn.relu),
Dense( units=256, activation = tf.nn.relu),
Dense(name = "output_layer", units = 10, activation = tf.nn.softmax)
])
model_2.summary()
Model: "sequential_9" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= dense_35 (Dense) (None, 1024) 3146752 _________________________________________________________________ dense_36 (Dense) (None, 512) 524800 _________________________________________________________________ dense_37 (Dense) (None, 256) 131328 _________________________________________________________________ output_layer (Dense) (None, 10) 2570 ================================================================= Total params: 3,805,450 Trainable params: 3,805,450 Non-trainable params: 0 _________________________________________________________________
history_2, model_2 = compile_train_model(model_2,
x_train_flat, y_train_split,
x_valid_flat, y_valid_split
)
Epoch 1/200 88/88 [==============================] - 1s 9ms/step - loss: 2.9055 - accuracy: 0.1642 - val_loss: 2.1172 - val_accuracy: 0.2266 Epoch 2/200 88/88 [==============================] - 1s 7ms/step - loss: 2.0687 - accuracy: 0.2419 - val_loss: 2.2531 - val_accuracy: 0.2022 Epoch 3/200 88/88 [==============================] - 1s 6ms/step - loss: 1.9436 - accuracy: 0.2966 - val_loss: 2.0106 - val_accuracy: 0.2736 Epoch 4/200 88/88 [==============================] - 1s 7ms/step - loss: 1.8676 - accuracy: 0.3224 - val_loss: 1.9256 - val_accuracy: 0.3120 Epoch 5/200 88/88 [==============================] - 1s 7ms/step - loss: 1.8110 - accuracy: 0.3482 - val_loss: 1.7738 - val_accuracy: 0.3612 Epoch 6/200 88/88 [==============================] - 1s 7ms/step - loss: 1.7556 - accuracy: 0.3714 - val_loss: 1.8168 - val_accuracy: 0.3570 Epoch 7/200 88/88 [==============================] - 1s 7ms/step - loss: 1.7285 - accuracy: 0.3807 - val_loss: 1.7278 - val_accuracy: 0.3766 Epoch 8/200 88/88 [==============================] - 1s 7ms/step - loss: 1.6897 - accuracy: 0.3952 - val_loss: 1.7306 - val_accuracy: 0.3834 Epoch 9/200 88/88 [==============================] - 1s 7ms/step - loss: 1.6528 - accuracy: 0.4063 - val_loss: 1.6498 - val_accuracy: 0.4056 Epoch 10/200 88/88 [==============================] - 1s 7ms/step - loss: 1.6085 - accuracy: 0.4239 - val_loss: 1.5878 - val_accuracy: 0.4340 Epoch 11/200 88/88 [==============================] - 1s 6ms/step - loss: 1.5831 - accuracy: 0.4352 - val_loss: 1.6016 - val_accuracy: 0.4266 Epoch 12/200 88/88 [==============================] - 1s 7ms/step - loss: 1.5569 - accuracy: 0.4456 - val_loss: 1.5179 - val_accuracy: 0.4554 Epoch 13/200 88/88 [==============================] - 1s 7ms/step - loss: 1.5231 - accuracy: 0.4556 - val_loss: 1.5787 - val_accuracy: 0.4342 Epoch 14/200 88/88 [==============================] - 1s 7ms/step - loss: 1.4999 - accuracy: 0.4594 - val_loss: 1.5838 - val_accuracy: 0.4416 Epoch 15/200 88/88 [==============================] - 1s 7ms/step - loss: 1.4779 - accuracy: 0.4709 - val_loss: 1.5856 - val_accuracy: 0.4278 Epoch 16/200 88/88 [==============================] - 1s 7ms/step - loss: 1.4484 - accuracy: 0.4832 - val_loss: 1.5624 - val_accuracy: 0.4416 Epoch 17/200 88/88 [==============================] - 1s 9ms/step - loss: 1.4246 - accuracy: 0.4907 - val_loss: 1.5357 - val_accuracy: 0.4456 Epoch 18/200 88/88 [==============================] - 1s 7ms/step - loss: 1.3868 - accuracy: 0.5019 - val_loss: 1.5206 - val_accuracy: 0.4670 Epoch 19/200 88/88 [==============================] - 1s 7ms/step - loss: 1.3664 - accuracy: 0.5106 - val_loss: 1.7001 - val_accuracy: 0.4262 Epoch 20/200 88/88 [==============================] - 1s 7ms/step - loss: 1.3434 - accuracy: 0.5179 - val_loss: 1.5527 - val_accuracy: 0.4578 Epoch 21/200 88/88 [==============================] - 1s 7ms/step - loss: 1.3133 - accuracy: 0.5305 - val_loss: 1.4741 - val_accuracy: 0.4844 Epoch 22/200 88/88 [==============================] - 1s 7ms/step - loss: 1.2863 - accuracy: 0.5426 - val_loss: 1.4521 - val_accuracy: 0.4924 Epoch 23/200 88/88 [==============================] - 1s 6ms/step - loss: 1.2582 - accuracy: 0.5507 - val_loss: 1.5361 - val_accuracy: 0.4710 Epoch 24/200 88/88 [==============================] - 1s 7ms/step - loss: 1.2342 - accuracy: 0.5588 - val_loss: 1.5808 - val_accuracy: 0.4516 Epoch 25/200 88/88 [==============================] - 1s 7ms/step - loss: 1.2132 - accuracy: 0.5655 - val_loss: 1.6059 - val_accuracy: 0.4512 Epoch 26/200 88/88 [==============================] - 1s 6ms/step - loss: 1.1874 - accuracy: 0.5742 - val_loss: 1.5888 - val_accuracy: 0.4644 Epoch 27/200 88/88 [==============================] - 1s 7ms/step - loss: 1.1522 - accuracy: 0.5872 - val_loss: 1.5248 - val_accuracy: 0.4864 Epoch 28/200 88/88 [==============================] - 1s 7ms/step - loss: 1.1278 - accuracy: 0.5946 - val_loss: 1.5441 - val_accuracy: 0.4888 Epoch 29/200 88/88 [==============================] - 1s 7ms/step - loss: 1.1078 - accuracy: 0.6007 - val_loss: 1.5889 - val_accuracy: 0.4720 Epoch 30/200 88/88 [==============================] - 1s 7ms/step - loss: 1.0795 - accuracy: 0.6138 - val_loss: 1.5251 - val_accuracy: 0.4946 Epoch 31/200 88/88 [==============================] - 1s 7ms/step - loss: 1.0457 - accuracy: 0.6252 - val_loss: 1.6085 - val_accuracy: 0.4734 Epoch 32/200 88/88 [==============================] - 1s 7ms/step - loss: 1.0223 - accuracy: 0.6336 - val_loss: 1.8577 - val_accuracy: 0.4444 Epoch 33/200 88/88 [==============================] - 1s 6ms/step - loss: 0.9973 - accuracy: 0.6431 - val_loss: 1.5792 - val_accuracy: 0.5028 Epoch 34/200 88/88 [==============================] - 1s 7ms/step - loss: 0.9734 - accuracy: 0.6505 - val_loss: 1.6035 - val_accuracy: 0.4924 Epoch 35/200 88/88 [==============================] - 1s 7ms/step - loss: 0.9340 - accuracy: 0.6651 - val_loss: 1.7638 - val_accuracy: 0.4582 Epoch 36/200 88/88 [==============================] - 1s 6ms/step - loss: 0.9180 - accuracy: 0.6705 - val_loss: 1.6039 - val_accuracy: 0.5168 Epoch 37/200 88/88 [==============================] - 1s 7ms/step - loss: 0.8879 - accuracy: 0.6812 - val_loss: 1.7506 - val_accuracy: 0.4902 Epoch 38/200 88/88 [==============================] - 1s 6ms/step - loss: 0.8709 - accuracy: 0.6898 - val_loss: 1.7714 - val_accuracy: 0.4716 Epoch 39/200 88/88 [==============================] - 1s 7ms/step - loss: 0.8381 - accuracy: 0.6996 - val_loss: 1.7204 - val_accuracy: 0.4910 Epoch 40/200 88/88 [==============================] - 1s 7ms/step - loss: 0.8175 - accuracy: 0.7062 - val_loss: 1.7733 - val_accuracy: 0.4746 Epoch 41/200 88/88 [==============================] - 1s 7ms/step - loss: 0.7865 - accuracy: 0.7179 - val_loss: 1.7991 - val_accuracy: 0.4858 Epoch 42/200 88/88 [==============================] - 1s 7ms/step - loss: 0.7697 - accuracy: 0.7235 - val_loss: 1.7691 - val_accuracy: 0.4988 Epoch 43/200 88/88 [==============================] - 1s 7ms/step - loss: 0.7496 - accuracy: 0.7314 - val_loss: 1.8124 - val_accuracy: 0.5000 Epoch 44/200 88/88 [==============================] - 1s 7ms/step - loss: 0.7224 - accuracy: 0.7430 - val_loss: 1.9812 - val_accuracy: 0.4852 Epoch 45/200 88/88 [==============================] - 1s 7ms/step - loss: 0.7044 - accuracy: 0.7477 - val_loss: 1.9868 - val_accuracy: 0.4642 Epoch 46/200 88/88 [==============================] - 1s 7ms/step - loss: 0.6811 - accuracy: 0.7551 - val_loss: 1.8934 - val_accuracy: 0.4976 Finished model training in 32s Model performance with training set 1407/1407 [==============================] - 3s 2ms/step - loss: 0.5534 - accuracy: 0.8061 Evaluating model performance with validation set 157/157 [==============================] - 0s 2ms/step - loss: 1.8934 - accuracy: 0.4976
plot_history(history_2)
pred2 = model_2.predict(x_test_flat)
pred2 = np.argmax(pred2, axis=1)
print_validation_report(y_test, pred2)
Classification Report
precision recall f1-score support
0 0.57 0.56 0.57 1000
1 0.67 0.56 0.61 1000
2 0.40 0.44 0.42 1000
3 0.37 0.29 0.33 1000
4 0.42 0.34 0.38 1000
5 0.42 0.40 0.41 1000
6 0.46 0.66 0.55 1000
7 0.53 0.59 0.56 1000
8 0.70 0.62 0.66 1000
9 0.53 0.61 0.57 1000
accuracy 0.51 10000
macro avg 0.51 0.51 0.50 10000
weighted avg 0.51 0.51 0.50 10000
Accuracy Score: 0.5065
Root Mean Square Error: 3.0167697956589263
plot_confusion_matrix_labeled( y_test, pred2)
model_3 = Sequential([
Conv2D(filters= 64, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu,input_shape=x_train_norm.shape[1:]),
MaxPool2D((2, 2),strides=2),
Conv2D(filters= 128, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu),
MaxPool2D((2, 2),strides=2),
Flatten(),
Dense(units=384,activation=tf.nn.relu),
Dense(units=10, activation=tf.nn.softmax)
])
model_3.summary()
Model: "sequential_12" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_6 (Conv2D) (None, 30, 30, 64) 1792 _________________________________________________________________ max_pooling2d_6 (MaxPooling2 (None, 15, 15, 64) 0 _________________________________________________________________ conv2d_7 (Conv2D) (None, 13, 13, 128) 73856 _________________________________________________________________ max_pooling2d_7 (MaxPooling2 (None, 6, 6, 128) 0 _________________________________________________________________ flatten_4 (Flatten) (None, 4608) 0 _________________________________________________________________ dense_42 (Dense) (None, 384) 1769856 _________________________________________________________________ dense_43 (Dense) (None, 10) 3850 ================================================================= Total params: 1,849,354 Trainable params: 1,849,354 Non-trainable params: 0 _________________________________________________________________
history_3, model_3 = compile_train_model(model_3,
x_train_norm, y_train_split,
x_valid_norm, y_valid_split
)
Epoch 1/200 88/88 [==============================] - 2s 16ms/step - loss: 1.9167 - accuracy: 0.3217 - val_loss: 1.9036 - val_accuracy: 0.3328 Epoch 2/200 88/88 [==============================] - 1s 14ms/step - loss: 1.5096 - accuracy: 0.4704 - val_loss: 1.4079 - val_accuracy: 0.4930 Epoch 3/200 88/88 [==============================] - 1s 14ms/step - loss: 1.3161 - accuracy: 0.5418 - val_loss: 1.2973 - val_accuracy: 0.5468 Epoch 4/200 88/88 [==============================] - 1s 14ms/step - loss: 1.1773 - accuracy: 0.5940 - val_loss: 1.0925 - val_accuracy: 0.6242 Epoch 5/200 88/88 [==============================] - 1s 14ms/step - loss: 1.0806 - accuracy: 0.6259 - val_loss: 1.1620 - val_accuracy: 0.5864 Epoch 6/200 88/88 [==============================] - 1s 14ms/step - loss: 0.9950 - accuracy: 0.6561 - val_loss: 1.0089 - val_accuracy: 0.6474 Epoch 7/200 88/88 [==============================] - 1s 14ms/step - loss: 0.9146 - accuracy: 0.6833 - val_loss: 0.9848 - val_accuracy: 0.6570 Epoch 8/200 88/88 [==============================] - 1s 14ms/step - loss: 0.8537 - accuracy: 0.7045 - val_loss: 0.9130 - val_accuracy: 0.6816 Epoch 9/200 88/88 [==============================] - 1s 14ms/step - loss: 0.7906 - accuracy: 0.7297 - val_loss: 0.9356 - val_accuracy: 0.6782 Epoch 10/200 88/88 [==============================] - 1s 14ms/step - loss: 0.7263 - accuracy: 0.7506 - val_loss: 0.8769 - val_accuracy: 0.7040 Epoch 11/200 88/88 [==============================] - 1s 14ms/step - loss: 0.6635 - accuracy: 0.7716 - val_loss: 0.8559 - val_accuracy: 0.7176 Epoch 12/200 88/88 [==============================] - 1s 14ms/step - loss: 0.6051 - accuracy: 0.7914 - val_loss: 0.9243 - val_accuracy: 0.6796 Epoch 13/200 88/88 [==============================] - 1s 14ms/step - loss: 0.5437 - accuracy: 0.8122 - val_loss: 1.0156 - val_accuracy: 0.6702 Epoch 14/200 88/88 [==============================] - 1s 14ms/step - loss: 0.4919 - accuracy: 0.8322 - val_loss: 0.9490 - val_accuracy: 0.6980 Epoch 15/200 88/88 [==============================] - 1s 14ms/step - loss: 0.4373 - accuracy: 0.8514 - val_loss: 0.8883 - val_accuracy: 0.7206 Epoch 16/200 88/88 [==============================] - 1s 14ms/step - loss: 0.3822 - accuracy: 0.8706 - val_loss: 0.9511 - val_accuracy: 0.7040 Epoch 17/200 88/88 [==============================] - 1s 14ms/step - loss: 0.3310 - accuracy: 0.8886 - val_loss: 0.9846 - val_accuracy: 0.7070 Epoch 18/200 88/88 [==============================] - 1s 14ms/step - loss: 0.2817 - accuracy: 0.9068 - val_loss: 0.9720 - val_accuracy: 0.7122 Epoch 19/200 88/88 [==============================] - 1s 14ms/step - loss: 0.2472 - accuracy: 0.9195 - val_loss: 0.9797 - val_accuracy: 0.7188 Epoch 20/200 88/88 [==============================] - 1s 14ms/step - loss: 0.2031 - accuracy: 0.9359 - val_loss: 1.1015 - val_accuracy: 0.7086 Epoch 21/200 88/88 [==============================] - 1s 14ms/step - loss: 0.1641 - accuracy: 0.9488 - val_loss: 1.4378 - val_accuracy: 0.6772 Epoch 22/200 88/88 [==============================] - 1s 14ms/step - loss: 0.1498 - accuracy: 0.9538 - val_loss: 1.1864 - val_accuracy: 0.7110 Epoch 23/200 88/88 [==============================] - 1s 14ms/step - loss: 0.1248 - accuracy: 0.9640 - val_loss: 1.1427 - val_accuracy: 0.7326 Epoch 24/200 88/88 [==============================] - 1s 14ms/step - loss: 0.1029 - accuracy: 0.9716 - val_loss: 1.3195 - val_accuracy: 0.7098 Epoch 25/200 88/88 [==============================] - 1s 14ms/step - loss: 0.0948 - accuracy: 0.9771 - val_loss: 1.2438 - val_accuracy: 0.7294 Epoch 26/200 88/88 [==============================] - 1s 14ms/step - loss: 0.0748 - accuracy: 0.9819 - val_loss: 1.4601 - val_accuracy: 0.7134 Epoch 27/200 88/88 [==============================] - 1s 14ms/step - loss: 0.0876 - accuracy: 0.9786 - val_loss: 1.2888 - val_accuracy: 0.7266 Epoch 28/200 88/88 [==============================] - 1s 14ms/step - loss: 0.0687 - accuracy: 0.9831 - val_loss: 1.3260 - val_accuracy: 0.7310 Epoch 29/200 88/88 [==============================] - 1s 14ms/step - loss: 0.0625 - accuracy: 0.9860 - val_loss: 1.4182 - val_accuracy: 0.7356 Epoch 30/200 88/88 [==============================] - 1s 14ms/step - loss: 0.0767 - accuracy: 0.9812 - val_loss: 1.3480 - val_accuracy: 0.7240 Epoch 31/200 88/88 [==============================] - 1s 14ms/step - loss: 0.0610 - accuracy: 0.9854 - val_loss: 1.4542 - val_accuracy: 0.7282 Epoch 32/200 88/88 [==============================] - 1s 14ms/step - loss: 0.0789 - accuracy: 0.9841 - val_loss: 1.4281 - val_accuracy: 0.7094 Epoch 33/200 88/88 [==============================] - 1s 14ms/step - loss: 0.0411 - accuracy: 0.9917 - val_loss: 1.5645 - val_accuracy: 0.7322 Epoch 34/200 88/88 [==============================] - 1s 14ms/step - loss: 0.0581 - accuracy: 0.9871 - val_loss: 1.5461 - val_accuracy: 0.7316 Epoch 35/200 88/88 [==============================] - 1s 14ms/step - loss: 0.0538 - accuracy: 0.9881 - val_loss: 1.6006 - val_accuracy: 0.7364 Epoch 36/200 88/88 [==============================] - 1s 14ms/step - loss: 0.0591 - accuracy: 0.9885 - val_loss: 1.6698 - val_accuracy: 0.7310 Epoch 37/200 88/88 [==============================] - 1s 14ms/step - loss: 0.0565 - accuracy: 0.9893 - val_loss: 1.6407 - val_accuracy: 0.7338 Epoch 38/200 88/88 [==============================] - 1s 14ms/step - loss: 0.0490 - accuracy: 0.9911 - val_loss: 3.7784 - val_accuracy: 0.6194 Epoch 39/200 88/88 [==============================] - 1s 14ms/step - loss: 0.0664 - accuracy: 0.9906 - val_loss: 1.6340 - val_accuracy: 0.7304 Epoch 40/200 88/88 [==============================] - 1s 14ms/step - loss: 0.0312 - accuracy: 0.9930 - val_loss: 1.7294 - val_accuracy: 0.7298 Epoch 41/200 88/88 [==============================] - 1s 14ms/step - loss: 0.0960 - accuracy: 0.9834 - val_loss: 1.6903 - val_accuracy: 0.7216 Epoch 42/200 88/88 [==============================] - 1s 14ms/step - loss: 0.0464 - accuracy: 0.9916 - val_loss: 1.7455 - val_accuracy: 0.7346 Epoch 43/200 88/88 [==============================] - 1s 14ms/step - loss: 0.0775 - accuracy: 0.9885 - val_loss: 1.6385 - val_accuracy: 0.7110 Epoch 44/200 88/88 [==============================] - 1s 14ms/step - loss: 0.0328 - accuracy: 0.9933 - val_loss: 1.8082 - val_accuracy: 0.7332 Epoch 45/200 88/88 [==============================] - 1s 14ms/step - loss: 0.0326 - accuracy: 0.9936 - val_loss: 1.8764 - val_accuracy: 0.7290 Finished model training in 59s Model performance with training set 1407/1407 [==============================] - 3s 2ms/step - loss: 8.6926e-04 - accuracy: 1.0000 Evaluating model performance with validation set 157/157 [==============================] - 0s 2ms/step - loss: 1.8764 - accuracy: 0.7290
model_3.summary
<bound method Model.summary of <keras.engine.sequential.Sequential object at 0x7f84bf6c1b50>>
plot_history(history_3)
pred3 = model_3.predict(x_test_norm)
pred3 = np.argmax(pred3, axis=1)
print_validation_report(y_test, pred3)
Classification Report
precision recall f1-score support
0 0.75 0.78 0.77 1000
1 0.82 0.83 0.82 1000
2 0.65 0.63 0.64 1000
3 0.54 0.53 0.54 1000
4 0.69 0.69 0.69 1000
5 0.65 0.61 0.63 1000
6 0.75 0.82 0.78 1000
7 0.78 0.76 0.77 1000
8 0.81 0.81 0.81 1000
9 0.79 0.79 0.79 1000
accuracy 0.73 10000
macro avg 0.72 0.73 0.72 10000
weighted avg 0.72 0.73 0.72 10000
Accuracy Score: 0.7253
Root Mean Square Error: 2.187715703650728
plot_confusion_matrix_labeled( y_test, pred3)
model_4 = Sequential([
Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu,input_shape=x_train_norm.shape[1:]),
MaxPool2D((2, 2),strides=2),
Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu),
MaxPool2D((2, 2),strides=2),
Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu),
MaxPool2D((2, 2),strides=2),
Flatten(),
Dense(units=384,activation=tf.nn.relu),
Dense(units=10, activation=tf.nn.softmax)
])
history_4, model_4 = compile_train_model(model_4,
x_train_norm, y_train_split,
x_valid_norm, y_valid_split
)
Epoch 1/200 88/88 [==============================] - 2s 20ms/step - loss: 2.0764 - accuracy: 0.2471 - val_loss: 1.7600 - val_accuracy: 0.3810 Epoch 2/200 88/88 [==============================] - 1s 16ms/step - loss: 1.6780 - accuracy: 0.4037 - val_loss: 1.4660 - val_accuracy: 0.4824 Epoch 3/200 88/88 [==============================] - 1s 17ms/step - loss: 1.4758 - accuracy: 0.4801 - val_loss: 1.3231 - val_accuracy: 0.5284 Epoch 4/200 88/88 [==============================] - 1s 16ms/step - loss: 1.3266 - accuracy: 0.5348 - val_loss: 1.1711 - val_accuracy: 0.5936 Epoch 5/200 88/88 [==============================] - 1s 16ms/step - loss: 1.2064 - accuracy: 0.5794 - val_loss: 1.1063 - val_accuracy: 0.6050 Epoch 6/200 88/88 [==============================] - 1s 17ms/step - loss: 1.1096 - accuracy: 0.6165 - val_loss: 1.1310 - val_accuracy: 0.6108 Epoch 7/200 88/88 [==============================] - 1s 16ms/step - loss: 1.0215 - accuracy: 0.6470 - val_loss: 1.4054 - val_accuracy: 0.5194 Epoch 8/200 88/88 [==============================] - 1s 16ms/step - loss: 0.9412 - accuracy: 0.6721 - val_loss: 1.1113 - val_accuracy: 0.6142 Epoch 9/200 88/88 [==============================] - 1s 17ms/step - loss: 0.8721 - accuracy: 0.6977 - val_loss: 1.0599 - val_accuracy: 0.6308 Epoch 10/200 88/88 [==============================] - 1s 17ms/step - loss: 0.8093 - accuracy: 0.7196 - val_loss: 1.0042 - val_accuracy: 0.6486 Epoch 11/200 88/88 [==============================] - 1s 16ms/step - loss: 0.7403 - accuracy: 0.7419 - val_loss: 0.9159 - val_accuracy: 0.6844 Epoch 12/200 88/88 [==============================] - 1s 16ms/step - loss: 0.6917 - accuracy: 0.7593 - val_loss: 0.8824 - val_accuracy: 0.6896 Epoch 13/200 88/88 [==============================] - 1s 16ms/step - loss: 0.6295 - accuracy: 0.7810 - val_loss: 0.9165 - val_accuracy: 0.6864 Epoch 14/200 88/88 [==============================] - 1s 16ms/step - loss: 0.5755 - accuracy: 0.7992 - val_loss: 0.9678 - val_accuracy: 0.6858 Epoch 15/200 88/88 [==============================] - 1s 16ms/step - loss: 0.5279 - accuracy: 0.8169 - val_loss: 0.9549 - val_accuracy: 0.6838 Epoch 16/200 88/88 [==============================] - 1s 17ms/step - loss: 0.4759 - accuracy: 0.8351 - val_loss: 0.8710 - val_accuracy: 0.7138 Epoch 17/200 88/88 [==============================] - 1s 17ms/step - loss: 0.4325 - accuracy: 0.8501 - val_loss: 0.9266 - val_accuracy: 0.7024 Epoch 18/200 88/88 [==============================] - 1s 16ms/step - loss: 0.3787 - accuracy: 0.8687 - val_loss: 0.8786 - val_accuracy: 0.7314 Epoch 19/200 88/88 [==============================] - 1s 17ms/step - loss: 0.3344 - accuracy: 0.8852 - val_loss: 0.9950 - val_accuracy: 0.7100 Epoch 20/200 88/88 [==============================] - 1s 16ms/step - loss: 0.2996 - accuracy: 0.8958 - val_loss: 0.9994 - val_accuracy: 0.7104 Epoch 21/200 88/88 [==============================] - 1s 16ms/step - loss: 0.2500 - accuracy: 0.9147 - val_loss: 1.2577 - val_accuracy: 0.6814 Epoch 22/200 88/88 [==============================] - 1s 17ms/step - loss: 0.2122 - accuracy: 0.9284 - val_loss: 1.1668 - val_accuracy: 0.7082 Epoch 23/200 88/88 [==============================] - 1s 17ms/step - loss: 0.1974 - accuracy: 0.9334 - val_loss: 1.0949 - val_accuracy: 0.7286 Epoch 24/200 88/88 [==============================] - 1s 17ms/step - loss: 0.1647 - accuracy: 0.9463 - val_loss: 1.2257 - val_accuracy: 0.7098 Epoch 25/200 88/88 [==============================] - 1s 17ms/step - loss: 0.1477 - accuracy: 0.9521 - val_loss: 1.1951 - val_accuracy: 0.7136 Epoch 26/200 88/88 [==============================] - 1s 17ms/step - loss: 0.1258 - accuracy: 0.9610 - val_loss: 1.2684 - val_accuracy: 0.7266 Epoch 27/200 88/88 [==============================] - 1s 16ms/step - loss: 0.1187 - accuracy: 0.9646 - val_loss: 1.2609 - val_accuracy: 0.7360 Epoch 28/200 88/88 [==============================] - 1s 16ms/step - loss: 0.1178 - accuracy: 0.9691 - val_loss: 1.4944 - val_accuracy: 0.7060 Epoch 29/200 88/88 [==============================] - 1s 17ms/step - loss: 0.1077 - accuracy: 0.9698 - val_loss: 1.3926 - val_accuracy: 0.7116 Epoch 30/200 88/88 [==============================] - 1s 16ms/step - loss: 0.0950 - accuracy: 0.9761 - val_loss: 1.4694 - val_accuracy: 0.7238 Epoch 31/200 88/88 [==============================] - 1s 16ms/step - loss: 0.1041 - accuracy: 0.9724 - val_loss: 1.4218 - val_accuracy: 0.7334 Epoch 32/200 88/88 [==============================] - 1s 17ms/step - loss: 0.0897 - accuracy: 0.9787 - val_loss: 1.5240 - val_accuracy: 0.7364 Epoch 33/200 88/88 [==============================] - 1s 17ms/step - loss: 0.1024 - accuracy: 0.9740 - val_loss: 1.4837 - val_accuracy: 0.7384 Epoch 34/200 88/88 [==============================] - 1s 17ms/step - loss: 0.0920 - accuracy: 0.9748 - val_loss: 1.4789 - val_accuracy: 0.7274 Epoch 35/200 88/88 [==============================] - 1s 16ms/step - loss: 0.0909 - accuracy: 0.9777 - val_loss: 1.5027 - val_accuracy: 0.7228 Epoch 36/200 88/88 [==============================] - 1s 17ms/step - loss: 0.0511 - accuracy: 0.9860 - val_loss: 1.6508 - val_accuracy: 0.7346 Epoch 37/200 88/88 [==============================] - 1s 16ms/step - loss: 0.1086 - accuracy: 0.9735 - val_loss: 1.5375 - val_accuracy: 0.7276 Epoch 38/200 88/88 [==============================] - 1s 17ms/step - loss: 0.0635 - accuracy: 0.9843 - val_loss: 1.6816 - val_accuracy: 0.7304 Epoch 39/200 88/88 [==============================] - 1s 16ms/step - loss: 0.0852 - accuracy: 0.9794 - val_loss: 1.6093 - val_accuracy: 0.7210 Epoch 40/200 88/88 [==============================] - 1s 16ms/step - loss: 0.0714 - accuracy: 0.9822 - val_loss: 1.6784 - val_accuracy: 0.7284 Epoch 41/200 88/88 [==============================] - 1s 17ms/step - loss: 0.0666 - accuracy: 0.9829 - val_loss: 1.7767 - val_accuracy: 0.7276 Epoch 42/200 88/88 [==============================] - 1s 17ms/step - loss: 0.0688 - accuracy: 0.9832 - val_loss: 2.2615 - val_accuracy: 0.6916 Epoch 43/200 88/88 [==============================] - 1s 16ms/step - loss: 0.0852 - accuracy: 0.9805 - val_loss: 1.7426 - val_accuracy: 0.7312 Finished model training in 65s Model performance with training set 1407/1407 [==============================] - 3s 2ms/step - loss: 0.0043 - accuracy: 0.9994 Evaluating model performance with validation set 157/157 [==============================] - 0s 2ms/step - loss: 1.7426 - accuracy: 0.7312
plot_history(history_4)
pred4 = model_4.predict(x_test_norm)
pred4 = np.argmax(pred4, axis=1)
print_validation_report(y_test, pred4)
Classification Report
precision recall f1-score support
0 0.77 0.76 0.76 1000
1 0.84 0.84 0.84 1000
2 0.61 0.67 0.64 1000
3 0.56 0.59 0.57 1000
4 0.69 0.65 0.67 1000
5 0.66 0.58 0.62 1000
6 0.76 0.81 0.79 1000
7 0.79 0.78 0.78 1000
8 0.83 0.84 0.84 1000
9 0.80 0.80 0.80 1000
accuracy 0.73 10000
macro avg 0.73 0.73 0.73 10000
weighted avg 0.73 0.73 0.73 10000
Accuracy Score: 0.7314
Root Mean Square Error: 2.117829077144801
plot_confusion_matrix_labeled( y_test, pred4)
Experiment 5¶
model_5 = Sequential([
Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu,input_shape=x_train_norm.shape[1:]),
MaxPool2D((2, 2),strides=2),
Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu),
MaxPool2D((2, 2),strides=2),
Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu),
MaxPool2D((2, 2),strides=2),
Conv2D(filters=512, kernel_size=(1, 1), strides=(1, 1), activation=tf.nn.relu),
MaxPool2D((2, 2),strides=2),
Flatten(),
Dense(units=384,activation=tf.nn.relu),
Dense(units=10, activation=tf.nn.softmax)
])
model_5.summary()
Model: "sequential_27" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_89 (Conv2D) (None, 30, 30, 64) 1792 _________________________________________________________________ max_pooling2d_89 (MaxPooling (None, 15, 15, 64) 0 _________________________________________________________________ conv2d_90 (Conv2D) (None, 13, 13, 128) 73856 _________________________________________________________________ max_pooling2d_90 (MaxPooling (None, 6, 6, 128) 0 _________________________________________________________________ conv2d_91 (Conv2D) (None, 4, 4, 256) 295168 _________________________________________________________________ max_pooling2d_91 (MaxPooling (None, 2, 2, 256) 0 _________________________________________________________________ conv2d_92 (Conv2D) (None, 2, 2, 512) 131584 _________________________________________________________________ max_pooling2d_92 (MaxPooling (None, 1, 1, 512) 0 _________________________________________________________________ flatten_21 (Flatten) (None, 512) 0 _________________________________________________________________ dense_42 (Dense) (None, 384) 196992 _________________________________________________________________ dense_43 (Dense) (None, 10) 3850 ================================================================= Total params: 703,242 Trainable params: 703,242 Non-trainable params: 0 _________________________________________________________________
history_5, model_5 = compile_train_model(model_5,
x_train_norm, y_train_split,
x_valid_norm, y_valid_split
)
Epoch 1/200 88/88 [==============================] - 3s 23ms/step - loss: 2.1980 - accuracy: 0.1768 - val_loss: 1.9763 - val_accuracy: 0.2692 Epoch 2/200 88/88 [==============================] - 2s 20ms/step - loss: 1.9308 - accuracy: 0.2974 - val_loss: 1.8298 - val_accuracy: 0.3206 Epoch 3/200 88/88 [==============================] - 2s 20ms/step - loss: 1.7282 - accuracy: 0.3732 - val_loss: 1.7442 - val_accuracy: 0.3806 Epoch 4/200 88/88 [==============================] - 2s 20ms/step - loss: 1.5837 - accuracy: 0.4313 - val_loss: 1.5949 - val_accuracy: 0.4268 Epoch 5/200 88/88 [==============================] - 2s 20ms/step - loss: 1.4817 - accuracy: 0.4718 - val_loss: 1.3456 - val_accuracy: 0.5206 Epoch 6/200 88/88 [==============================] - 2s 20ms/step - loss: 1.3781 - accuracy: 0.5094 - val_loss: 1.3129 - val_accuracy: 0.5338 Epoch 7/200 88/88 [==============================] - 2s 20ms/step - loss: 1.2977 - accuracy: 0.5405 - val_loss: 1.2206 - val_accuracy: 0.5608 Epoch 8/200 88/88 [==============================] - 2s 20ms/step - loss: 1.2098 - accuracy: 0.5733 - val_loss: 1.0961 - val_accuracy: 0.6044 Epoch 9/200 88/88 [==============================] - 2s 20ms/step - loss: 1.1328 - accuracy: 0.6013 - val_loss: 1.1456 - val_accuracy: 0.6108 Epoch 10/200 88/88 [==============================] - 2s 20ms/step - loss: 1.0605 - accuracy: 0.6287 - val_loss: 1.0106 - val_accuracy: 0.6436 Epoch 11/200 88/88 [==============================] - 2s 20ms/step - loss: 1.0071 - accuracy: 0.6460 - val_loss: 1.1352 - val_accuracy: 0.6106 Epoch 12/200 88/88 [==============================] - 2s 20ms/step - loss: 0.9479 - accuracy: 0.6671 - val_loss: 1.0127 - val_accuracy: 0.6462 Epoch 13/200 88/88 [==============================] - 2s 20ms/step - loss: 0.8880 - accuracy: 0.6916 - val_loss: 1.0342 - val_accuracy: 0.6354 Epoch 14/200 88/88 [==============================] - 2s 20ms/step - loss: 0.8353 - accuracy: 0.7106 - val_loss: 0.9671 - val_accuracy: 0.6594 Epoch 15/200 88/88 [==============================] - 2s 20ms/step - loss: 0.7748 - accuracy: 0.7290 - val_loss: 1.2907 - val_accuracy: 0.5746 Epoch 16/200 88/88 [==============================] - 2s 20ms/step - loss: 0.7350 - accuracy: 0.7461 - val_loss: 0.8683 - val_accuracy: 0.7030 Epoch 17/200 88/88 [==============================] - 2s 20ms/step - loss: 0.6905 - accuracy: 0.7591 - val_loss: 0.8625 - val_accuracy: 0.7016 Epoch 18/200 88/88 [==============================] - 2s 20ms/step - loss: 0.6279 - accuracy: 0.7817 - val_loss: 1.1466 - val_accuracy: 0.6362 Epoch 19/200 88/88 [==============================] - 2s 20ms/step - loss: 0.5969 - accuracy: 0.7920 - val_loss: 1.0783 - val_accuracy: 0.6524 Epoch 20/200 88/88 [==============================] - 2s 20ms/step - loss: 0.5499 - accuracy: 0.8070 - val_loss: 1.0301 - val_accuracy: 0.6832 Epoch 21/200 88/88 [==============================] - 2s 20ms/step - loss: 0.5040 - accuracy: 0.8227 - val_loss: 1.0256 - val_accuracy: 0.6784 Epoch 22/200 88/88 [==============================] - 2s 20ms/step - loss: 0.4762 - accuracy: 0.8363 - val_loss: 0.8688 - val_accuracy: 0.7274 Epoch 23/200 88/88 [==============================] - 2s 20ms/step - loss: 0.4294 - accuracy: 0.8514 - val_loss: 0.8963 - val_accuracy: 0.7220 Epoch 24/200 88/88 [==============================] - 2s 20ms/step - loss: 0.3921 - accuracy: 0.8637 - val_loss: 0.9964 - val_accuracy: 0.7098 Epoch 25/200 88/88 [==============================] - 2s 20ms/step - loss: 0.3649 - accuracy: 0.8731 - val_loss: 0.9313 - val_accuracy: 0.7278 Epoch 26/200 88/88 [==============================] - 2s 20ms/step - loss: 0.3230 - accuracy: 0.8884 - val_loss: 1.0458 - val_accuracy: 0.7184 Epoch 27/200 88/88 [==============================] - 2s 20ms/step - loss: 0.2988 - accuracy: 0.8999 - val_loss: 1.0644 - val_accuracy: 0.7088 Epoch 28/200 88/88 [==============================] - 2s 20ms/step - loss: 0.2874 - accuracy: 0.9067 - val_loss: 1.1258 - val_accuracy: 0.7074 Epoch 29/200 88/88 [==============================] - 2s 20ms/step - loss: 0.2627 - accuracy: 0.9173 - val_loss: 1.3491 - val_accuracy: 0.6520 Epoch 30/200 88/88 [==============================] - 2s 20ms/step - loss: 0.2455 - accuracy: 0.9224 - val_loss: 0.9993 - val_accuracy: 0.7406 Epoch 31/200 88/88 [==============================] - 2s 20ms/step - loss: 0.2177 - accuracy: 0.9334 - val_loss: 1.2448 - val_accuracy: 0.6758 Epoch 32/200 88/88 [==============================] - 2s 20ms/step - loss: 0.1923 - accuracy: 0.9400 - val_loss: 1.0759 - val_accuracy: 0.7460 Epoch 33/200 88/88 [==============================] - 2s 20ms/step - loss: 0.1897 - accuracy: 0.9410 - val_loss: 1.2424 - val_accuracy: 0.7006 Epoch 34/200 88/88 [==============================] - 2s 20ms/step - loss: 0.1618 - accuracy: 0.9576 - val_loss: 1.2624 - val_accuracy: 0.7304 Epoch 35/200 88/88 [==============================] - 2s 20ms/step - loss: 0.2166 - accuracy: 0.9417 - val_loss: 1.1987 - val_accuracy: 0.7274 Epoch 36/200 88/88 [==============================] - 2s 20ms/step - loss: 0.1422 - accuracy: 0.9605 - val_loss: 1.2859 - val_accuracy: 0.7498 Epoch 37/200 88/88 [==============================] - 2s 20ms/step - loss: 0.2114 - accuracy: 0.9480 - val_loss: 1.2791 - val_accuracy: 0.7544 Epoch 38/200 88/88 [==============================] - 2s 20ms/step - loss: 0.1533 - accuracy: 0.9589 - val_loss: 1.7771 - val_accuracy: 0.6132 Epoch 39/200 88/88 [==============================] - 2s 20ms/step - loss: 0.1832 - accuracy: 0.9506 - val_loss: 2.0559 - val_accuracy: 0.4944 Epoch 40/200 88/88 [==============================] - 2s 20ms/step - loss: 0.1827 - accuracy: 0.9526 - val_loss: 1.3570 - val_accuracy: 0.7558 Epoch 41/200 88/88 [==============================] - 2s 20ms/step - loss: 0.2094 - accuracy: 0.9475 - val_loss: 1.3169 - val_accuracy: 0.6926 Epoch 42/200 88/88 [==============================] - 2s 20ms/step - loss: 0.1349 - accuracy: 0.9629 - val_loss: 1.5097 - val_accuracy: 0.7544 Epoch 43/200 88/88 [==============================] - 2s 20ms/step - loss: 0.2006 - accuracy: 0.9589 - val_loss: 1.6839 - val_accuracy: 0.7284 Epoch 44/200 88/88 [==============================] - 2s 20ms/step - loss: 0.2096 - accuracy: 0.9481 - val_loss: 1.4343 - val_accuracy: 0.7594 Epoch 45/200 88/88 [==============================] - 2s 20ms/step - loss: 0.1897 - accuracy: 0.9571 - val_loss: 1.1260 - val_accuracy: 0.7058 Epoch 46/200 88/88 [==============================] - 2s 20ms/step - loss: 0.1335 - accuracy: 0.9632 - val_loss: 1.4593 - val_accuracy: 0.7500 Epoch 47/200 88/88 [==============================] - 2s 20ms/step - loss: 0.1985 - accuracy: 0.9497 - val_loss: 1.3438 - val_accuracy: 0.7444 Epoch 48/200 88/88 [==============================] - 2s 20ms/step - loss: 0.1390 - accuracy: 0.9660 - val_loss: 1.6893 - val_accuracy: 0.7478 Epoch 49/200 88/88 [==============================] - 2s 20ms/step - loss: 0.1895 - accuracy: 0.9538 - val_loss: 1.5453 - val_accuracy: 0.7472 Epoch 50/200 88/88 [==============================] - 2s 20ms/step - loss: 0.1608 - accuracy: 0.9609 - val_loss: 1.8524 - val_accuracy: 0.7470 Epoch 51/200 88/88 [==============================] - 2s 20ms/step - loss: 0.2090 - accuracy: 0.9471 - val_loss: 1.6571 - val_accuracy: 0.7504 Epoch 52/200 88/88 [==============================] - 2s 20ms/step - loss: 0.1819 - accuracy: 0.9536 - val_loss: 1.4218 - val_accuracy: 0.7428 Epoch 53/200 88/88 [==============================] - 2s 20ms/step - loss: 0.1663 - accuracy: 0.9561 - val_loss: 1.6742 - val_accuracy: 0.7366 Epoch 54/200 88/88 [==============================] - 2s 20ms/step - loss: 0.1590 - accuracy: 0.9594 - val_loss: 1.5335 - val_accuracy: 0.7528 Finished model training in 97s Model performance with training set 1407/1407 [==============================] - 4s 3ms/step - loss: 0.0063 - accuracy: 0.9992 Evaluating model performance with validation set 157/157 [==============================] - 0s 3ms/step - loss: 1.5335 - accuracy: 0.7528
plot_history(history_5)
pred5 = model_5.predict(x_test_norm)
pred5 = np.argmax(pred5, axis=1)
print_validation_report(y_test, pred5)
Classification Report
precision recall f1-score support
0 0.76 0.80 0.78 1000
1 0.88 0.84 0.86 1000
2 0.69 0.66 0.67 1000
3 0.60 0.58 0.59 1000
4 0.71 0.71 0.71 1000
5 0.66 0.67 0.66 1000
6 0.76 0.81 0.78 1000
7 0.79 0.76 0.77 1000
8 0.85 0.83 0.84 1000
9 0.80 0.82 0.81 1000
accuracy 0.75 10000
macro avg 0.75 0.75 0.75 10000
weighted avg 0.75 0.75 0.75 10000
Accuracy Score: 0.7482
Root Mean Square Error: 2.0625227271475093
plot_confusion_matrix_labeled( y_test, pred5)
Experiment 6¶
model_6 = Sequential([
Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu,input_shape=x_train_norm.shape[1:], padding='same'),
MaxPool2D((2, 2),strides=2),
Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same'),
MaxPool2D((2, 2),strides=2),
Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same'),
MaxPool2D((2, 2),strides=2),
Conv2D(filters=512, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same' ),
MaxPool2D((2, 2),strides=2),
Flatten(),
Dense(units=384,activation=tf.nn.relu),
Dense(units=10, activation=tf.nn.softmax)
])
model_6.summary()
Model: "sequential_37" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_131 (Conv2D) (None, 32, 32, 64) 1792 _________________________________________________________________ max_pooling2d_131 (MaxPoolin (None, 16, 16, 64) 0 _________________________________________________________________ conv2d_132 (Conv2D) (None, 16, 16, 128) 73856 _________________________________________________________________ max_pooling2d_132 (MaxPoolin (None, 8, 8, 128) 0 _________________________________________________________________ conv2d_133 (Conv2D) (None, 8, 8, 256) 295168 _________________________________________________________________ max_pooling2d_133 (MaxPoolin (None, 4, 4, 256) 0 _________________________________________________________________ conv2d_134 (Conv2D) (None, 4, 4, 512) 1180160 _________________________________________________________________ max_pooling2d_134 (MaxPoolin (None, 2, 2, 512) 0 _________________________________________________________________ flatten_31 (Flatten) (None, 2048) 0 _________________________________________________________________ dense_62 (Dense) (None, 384) 786816 _________________________________________________________________ dense_63 (Dense) (None, 10) 3850 ================================================================= Total params: 2,341,642 Trainable params: 2,341,642 Non-trainable params: 0 _________________________________________________________________
history_6, model_6 = compile_train_model(model_6,
x_train_norm, y_train_split,
x_valid_norm, y_valid_split
)
Epoch 1/200 88/88 [==============================] - 3s 29ms/step - loss: 2.2533 - accuracy: 0.2066 - val_loss: 2.0334 - val_accuracy: 0.2976 Epoch 2/200 88/88 [==============================] - 2s 27ms/step - loss: 1.7660 - accuracy: 0.3616 - val_loss: 1.4590 - val_accuracy: 0.4652 Epoch 3/200 88/88 [==============================] - 2s 26ms/step - loss: 1.5112 - accuracy: 0.4568 - val_loss: 1.3849 - val_accuracy: 0.5016 Epoch 4/200 88/88 [==============================] - 2s 26ms/step - loss: 1.3110 - accuracy: 0.5332 - val_loss: 1.2912 - val_accuracy: 0.5396 Epoch 5/200 88/88 [==============================] - 2s 26ms/step - loss: 1.1387 - accuracy: 0.5972 - val_loss: 1.0914 - val_accuracy: 0.6112 Epoch 6/200 88/88 [==============================] - 2s 26ms/step - loss: 1.0178 - accuracy: 0.6447 - val_loss: 0.9432 - val_accuracy: 0.6692 Epoch 7/200 88/88 [==============================] - 2s 26ms/step - loss: 0.8729 - accuracy: 0.6942 - val_loss: 0.9352 - val_accuracy: 0.6730 Epoch 8/200 88/88 [==============================] - 2s 26ms/step - loss: 0.7670 - accuracy: 0.7331 - val_loss: 0.8610 - val_accuracy: 0.7040 Epoch 9/200 88/88 [==============================] - 2s 26ms/step - loss: 0.6767 - accuracy: 0.7638 - val_loss: 0.9171 - val_accuracy: 0.6902 Epoch 10/200 88/88 [==============================] - 2s 26ms/step - loss: 0.5755 - accuracy: 0.8013 - val_loss: 0.8570 - val_accuracy: 0.7090 Epoch 11/200 88/88 [==============================] - 2s 26ms/step - loss: 0.4776 - accuracy: 0.8354 - val_loss: 0.8478 - val_accuracy: 0.7286 Epoch 12/200 88/88 [==============================] - 2s 26ms/step - loss: 0.3791 - accuracy: 0.8664 - val_loss: 0.8940 - val_accuracy: 0.7254 Epoch 13/200 88/88 [==============================] - 2s 26ms/step - loss: 0.3038 - accuracy: 0.8946 - val_loss: 0.9335 - val_accuracy: 0.7340 Epoch 14/200 88/88 [==============================] - 2s 26ms/step - loss: 0.2378 - accuracy: 0.9178 - val_loss: 0.9314 - val_accuracy: 0.7244 Epoch 15/200 88/88 [==============================] - 2s 26ms/step - loss: 0.1792 - accuracy: 0.9388 - val_loss: 1.0747 - val_accuracy: 0.7340 Epoch 16/200 88/88 [==============================] - 2s 26ms/step - loss: 0.1460 - accuracy: 0.9521 - val_loss: 1.2032 - val_accuracy: 0.7316 Epoch 17/200 88/88 [==============================] - 2s 26ms/step - loss: 0.1250 - accuracy: 0.9595 - val_loss: 1.1975 - val_accuracy: 0.7504 Epoch 18/200 88/88 [==============================] - 2s 26ms/step - loss: 0.1169 - accuracy: 0.9646 - val_loss: 1.1569 - val_accuracy: 0.7562 Epoch 19/200 88/88 [==============================] - 2s 26ms/step - loss: 0.1051 - accuracy: 0.9683 - val_loss: 1.2956 - val_accuracy: 0.7362 Epoch 20/200 88/88 [==============================] - 2s 26ms/step - loss: 0.0905 - accuracy: 0.9731 - val_loss: 1.2360 - val_accuracy: 0.7592 Epoch 21/200 88/88 [==============================] - 2s 26ms/step - loss: 0.1054 - accuracy: 0.9696 - val_loss: 1.3227 - val_accuracy: 0.6776 Epoch 22/200 88/88 [==============================] - 2s 26ms/step - loss: 0.0722 - accuracy: 0.9780 - val_loss: 1.3686 - val_accuracy: 0.7626 Epoch 23/200 88/88 [==============================] - 2s 26ms/step - loss: 0.0789 - accuracy: 0.9759 - val_loss: 1.3905 - val_accuracy: 0.7578 Epoch 24/200 88/88 [==============================] - 2s 26ms/step - loss: 0.0655 - accuracy: 0.9796 - val_loss: 1.4801 - val_accuracy: 0.7520 Epoch 25/200 88/88 [==============================] - 2s 26ms/step - loss: 0.0746 - accuracy: 0.9776 - val_loss: 1.9370 - val_accuracy: 0.6726 Epoch 26/200 88/88 [==============================] - 2s 26ms/step - loss: 0.0576 - accuracy: 0.9824 - val_loss: 1.5396 - val_accuracy: 0.7528 Epoch 27/200 88/88 [==============================] - 2s 26ms/step - loss: 0.0611 - accuracy: 0.9832 - val_loss: 1.8777 - val_accuracy: 0.7280 Epoch 28/200 88/88 [==============================] - 2s 26ms/step - loss: 0.0591 - accuracy: 0.9826 - val_loss: 1.6349 - val_accuracy: 0.7346 Epoch 29/200 88/88 [==============================] - 2s 26ms/step - loss: 0.0585 - accuracy: 0.9834 - val_loss: 1.6639 - val_accuracy: 0.7530 Epoch 30/200 88/88 [==============================] - 2s 26ms/step - loss: 0.0465 - accuracy: 0.9848 - val_loss: 1.7345 - val_accuracy: 0.7374 Epoch 31/200 88/88 [==============================] - 2s 26ms/step - loss: 0.0399 - accuracy: 0.9868 - val_loss: 1.7456 - val_accuracy: 0.7442 Epoch 32/200 88/88 [==============================] - 2s 26ms/step - loss: 0.0562 - accuracy: 0.9840 - val_loss: 1.7036 - val_accuracy: 0.7234 Finished model training in 76s Model performance with training set 1407/1407 [==============================] - 5s 3ms/step - loss: 0.0721 - accuracy: 0.9764 Evaluating model performance with validation set 157/157 [==============================] - 1s 3ms/step - loss: 1.7036 - accuracy: 0.7234
plot_history(history_6)
pred6 = model_6.predict(x_test_norm)
pred6 = np.argmax(pred6, axis=1)
print_validation_report(y_test, pred6)
Classification Report
precision recall f1-score support
0 0.76 0.78 0.77 1000
1 0.76 0.91 0.83 1000
2 0.69 0.59 0.64 1000
3 0.64 0.41 0.50 1000
4 0.67 0.68 0.68 1000
5 0.61 0.68 0.64 1000
6 0.67 0.86 0.75 1000
7 0.75 0.81 0.78 1000
8 0.91 0.75 0.82 1000
9 0.79 0.78 0.79 1000
accuracy 0.72 10000
macro avg 0.73 0.72 0.72 10000
weighted avg 0.73 0.72 0.72 10000
Accuracy Score: 0.7245
Root Mean Square Error: 2.1913694348511847
plot_confusion_matrix_labeled( y_test, pred6)
Experiment 7¶
model_7 = Sequential([
Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu,input_shape=x_train_norm.shape[1:], padding='same'),
MaxPool2D((2, 2),strides=2),
Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same'),
MaxPool2D((2, 2),strides=2),
Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same'),
MaxPool2D((2, 2),strides=2),
Conv2D(filters=512, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same' ),
MaxPool2D((2, 2),strides=2),
Conv2D(filters=1024, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same' ),
MaxPool2D((2, 2),strides=2),
Flatten(),
Dense(units=384,activation=tf.nn.relu),
Dense(units=10, activation=tf.nn.softmax)
])
model_7.summary()
Model: "sequential_34" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_118 (Conv2D) (None, 32, 32, 64) 1792 _________________________________________________________________ max_pooling2d_118 (MaxPoolin (None, 16, 16, 64) 0 _________________________________________________________________ conv2d_119 (Conv2D) (None, 16, 16, 128) 73856 _________________________________________________________________ max_pooling2d_119 (MaxPoolin (None, 8, 8, 128) 0 _________________________________________________________________ conv2d_120 (Conv2D) (None, 8, 8, 256) 295168 _________________________________________________________________ max_pooling2d_120 (MaxPoolin (None, 4, 4, 256) 0 _________________________________________________________________ conv2d_121 (Conv2D) (None, 4, 4, 512) 1180160 _________________________________________________________________ max_pooling2d_121 (MaxPoolin (None, 2, 2, 512) 0 _________________________________________________________________ conv2d_122 (Conv2D) (None, 2, 2, 1024) 4719616 _________________________________________________________________ max_pooling2d_122 (MaxPoolin (None, 1, 1, 1024) 0 _________________________________________________________________ flatten_28 (Flatten) (None, 1024) 0 _________________________________________________________________ dense_56 (Dense) (None, 384) 393600 _________________________________________________________________ dense_57 (Dense) (None, 10) 3850 ================================================================= Total params: 6,668,042 Trainable params: 6,668,042 Non-trainable params: 0 _________________________________________________________________
history_7, model_7 = compile_train_model(model_7,
x_train_norm, y_train_split,
x_valid_norm, y_valid_split
)
Epoch 1/200 88/88 [==============================] - 6s 50ms/step - loss: 2.3466 - accuracy: 0.1655 - val_loss: 2.0958 - val_accuracy: 0.2086 Epoch 2/200 88/88 [==============================] - 4s 41ms/step - loss: 1.9064 - accuracy: 0.3035 - val_loss: 1.6586 - val_accuracy: 0.3788 Epoch 3/200 88/88 [==============================] - 4s 41ms/step - loss: 1.6518 - accuracy: 0.3991 - val_loss: 1.4148 - val_accuracy: 0.4752 Epoch 4/200 88/88 [==============================] - 4s 41ms/step - loss: 1.4370 - accuracy: 0.4817 - val_loss: 1.3005 - val_accuracy: 0.5344 Epoch 5/200 88/88 [==============================] - 4s 41ms/step - loss: 1.2405 - accuracy: 0.5556 - val_loss: 1.1625 - val_accuracy: 0.5886 Epoch 6/200 88/88 [==============================] - 4s 41ms/step - loss: 1.1045 - accuracy: 0.6102 - val_loss: 1.0768 - val_accuracy: 0.6152 Epoch 7/200 88/88 [==============================] - 4s 41ms/step - loss: 0.9581 - accuracy: 0.6614 - val_loss: 0.9950 - val_accuracy: 0.6512 Epoch 8/200 88/88 [==============================] - 4s 41ms/step - loss: 0.8423 - accuracy: 0.7057 - val_loss: 0.8735 - val_accuracy: 0.6930 Epoch 9/200 88/88 [==============================] - 4s 41ms/step - loss: 0.7293 - accuracy: 0.7457 - val_loss: 0.8972 - val_accuracy: 0.6934 Epoch 10/200 88/88 [==============================] - 4s 41ms/step - loss: 0.6148 - accuracy: 0.7862 - val_loss: 0.8213 - val_accuracy: 0.7262 Epoch 11/200 88/88 [==============================] - 4s 41ms/step - loss: 0.5120 - accuracy: 0.8213 - val_loss: 0.8266 - val_accuracy: 0.7244 Epoch 12/200 88/88 [==============================] - 4s 41ms/step - loss: 0.4083 - accuracy: 0.8565 - val_loss: 0.8737 - val_accuracy: 0.7218 Epoch 13/200 88/88 [==============================] - 4s 41ms/step - loss: 0.3172 - accuracy: 0.8897 - val_loss: 0.9212 - val_accuracy: 0.7376 Epoch 14/200 88/88 [==============================] - 4s 41ms/step - loss: 0.2508 - accuracy: 0.9134 - val_loss: 0.9627 - val_accuracy: 0.7356 Epoch 15/200 88/88 [==============================] - 4s 41ms/step - loss: 0.1980 - accuracy: 0.9335 - val_loss: 1.0141 - val_accuracy: 0.7482 Epoch 16/200 88/88 [==============================] - 4s 41ms/step - loss: 0.1873 - accuracy: 0.9441 - val_loss: 0.9913 - val_accuracy: 0.7310 Epoch 17/200 88/88 [==============================] - 4s 41ms/step - loss: 0.1593 - accuracy: 0.9510 - val_loss: 1.0452 - val_accuracy: 0.7298 Epoch 18/200 88/88 [==============================] - 4s 41ms/step - loss: 0.1261 - accuracy: 0.9623 - val_loss: 1.1590 - val_accuracy: 0.7496 Epoch 19/200 88/88 [==============================] - 4s 41ms/step - loss: 0.1465 - accuracy: 0.9566 - val_loss: 1.1438 - val_accuracy: 0.7524 Epoch 20/200 88/88 [==============================] - 4s 41ms/step - loss: 0.1106 - accuracy: 0.9670 - val_loss: 1.2461 - val_accuracy: 0.7552 Epoch 21/200 88/88 [==============================] - 4s 41ms/step - loss: 0.0983 - accuracy: 0.9716 - val_loss: 1.4642 - val_accuracy: 0.7330 Epoch 22/200 88/88 [==============================] - 4s 41ms/step - loss: 0.1024 - accuracy: 0.9693 - val_loss: 1.3314 - val_accuracy: 0.7480 Epoch 23/200 88/88 [==============================] - 4s 41ms/step - loss: 0.1077 - accuracy: 0.9702 - val_loss: 1.8797 - val_accuracy: 0.7060 Epoch 24/200 88/88 [==============================] - 4s 41ms/step - loss: 0.0827 - accuracy: 0.9751 - val_loss: 1.6907 - val_accuracy: 0.7266 Epoch 25/200 88/88 [==============================] - 4s 41ms/step - loss: 0.0908 - accuracy: 0.9751 - val_loss: 1.2808 - val_accuracy: 0.7488 Epoch 26/200 88/88 [==============================] - 4s 41ms/step - loss: 0.0982 - accuracy: 0.9735 - val_loss: 1.4286 - val_accuracy: 0.7524 Epoch 27/200 88/88 [==============================] - 4s 41ms/step - loss: 0.0830 - accuracy: 0.9762 - val_loss: 1.3736 - val_accuracy: 0.7628 Epoch 28/200 88/88 [==============================] - 4s 41ms/step - loss: 0.1044 - accuracy: 0.9740 - val_loss: 1.6676 - val_accuracy: 0.7482 Epoch 29/200 88/88 [==============================] - 4s 41ms/step - loss: 0.0929 - accuracy: 0.9759 - val_loss: 1.3279 - val_accuracy: 0.7576 Epoch 30/200 88/88 [==============================] - 4s 43ms/step - loss: 0.1026 - accuracy: 0.9719 - val_loss: 1.2917 - val_accuracy: 0.7440 Epoch 31/200 88/88 [==============================] - 4s 41ms/step - loss: 0.0706 - accuracy: 0.9811 - val_loss: 1.3515 - val_accuracy: 0.7130 Epoch 32/200 88/88 [==============================] - 4s 41ms/step - loss: 0.0795 - accuracy: 0.9798 - val_loss: 1.9179 - val_accuracy: 0.6618 Epoch 33/200 88/88 [==============================] - 4s 42ms/step - loss: 0.0695 - accuracy: 0.9813 - val_loss: 1.5609 - val_accuracy: 0.7546 Epoch 34/200 88/88 [==============================] - 4s 41ms/step - loss: 0.0793 - accuracy: 0.9771 - val_loss: 1.1955 - val_accuracy: 0.7362 Epoch 35/200 88/88 [==============================] - 4s 41ms/step - loss: 0.0529 - accuracy: 0.9860 - val_loss: 1.7554 - val_accuracy: 0.7460 Epoch 36/200 88/88 [==============================] - 4s 41ms/step - loss: 0.0808 - accuracy: 0.9779 - val_loss: 1.6415 - val_accuracy: 0.7516 Epoch 37/200 88/88 [==============================] - 4s 42ms/step - loss: 0.0576 - accuracy: 0.9839 - val_loss: 1.8877 - val_accuracy: 0.7470 Finished model training in 139s Model performance with training set 1407/1407 [==============================] - 6s 4ms/step - loss: 0.0157 - accuracy: 0.9950 Evaluating model performance with validation set 157/157 [==============================] - 1s 4ms/step - loss: 1.8877 - accuracy: 0.7470
plot_history(history_7)
pred7 = model_7.predict(x_test_norm)
pred7 = np.argmax(pred7, axis=1)
print_validation_report(y_test, pred7)
Classification Report
precision recall f1-score support
0 0.73 0.83 0.77 1000
1 0.92 0.82 0.86 1000
2 0.67 0.66 0.67 1000
3 0.56 0.56 0.56 1000
4 0.75 0.67 0.71 1000
5 0.65 0.63 0.64 1000
6 0.73 0.83 0.78 1000
7 0.78 0.81 0.80 1000
8 0.89 0.79 0.84 1000
9 0.80 0.85 0.82 1000
accuracy 0.74 10000
macro avg 0.75 0.74 0.74 10000
weighted avg 0.75 0.74 0.74 10000
Accuracy Score: 0.7445
Root Mean Square Error: 2.1114923632350653
plot_confusion_matrix_labeled( y_test, pred7)
Experiment 8¶
model_8 = Sequential([
Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu,input_shape=x_train_norm.shape[1:], padding='same'),
MaxPool2D((2, 2),strides=2),
Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same'),
MaxPool2D((2, 2),strides=2),
Conv2D(filters=512, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same'),
MaxPool2D((2, 2),strides=2),
Conv2D(filters=1024, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same' ),
MaxPool2D((2, 2),strides=2),
Flatten(),
Dense(units=384,activation=tf.nn.relu),
Dense(units=10, activation=tf.nn.softmax)
])
model_8.summary()
Model: "sequential_36" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_127 (Conv2D) (None, 32, 32, 128) 3584 _________________________________________________________________ max_pooling2d_127 (MaxPoolin (None, 16, 16, 128) 0 _________________________________________________________________ conv2d_128 (Conv2D) (None, 16, 16, 256) 295168 _________________________________________________________________ max_pooling2d_128 (MaxPoolin (None, 8, 8, 256) 0 _________________________________________________________________ conv2d_129 (Conv2D) (None, 8, 8, 512) 1180160 _________________________________________________________________ max_pooling2d_129 (MaxPoolin (None, 4, 4, 512) 0 _________________________________________________________________ conv2d_130 (Conv2D) (None, 4, 4, 1024) 4719616 _________________________________________________________________ max_pooling2d_130 (MaxPoolin (None, 2, 2, 1024) 0 _________________________________________________________________ flatten_30 (Flatten) (None, 4096) 0 _________________________________________________________________ dense_60 (Dense) (None, 384) 1573248 _________________________________________________________________ dense_61 (Dense) (None, 10) 3850 ================================================================= Total params: 7,775,626 Trainable params: 7,775,626 Non-trainable params: 0 _________________________________________________________________
history_8, model_8 = compile_train_model(model_8,
x_train_norm, y_train_split,
x_valid_norm, y_valid_split
)
Epoch 1/200 88/88 [==============================] - 10s 87ms/step - loss: 2.6543 - accuracy: 0.1776 - val_loss: 1.9819 - val_accuracy: 0.2678 Epoch 2/200 88/88 [==============================] - 5s 58ms/step - loss: 1.8452 - accuracy: 0.3281 - val_loss: 1.6172 - val_accuracy: 0.4158 Epoch 3/200 88/88 [==============================] - 5s 58ms/step - loss: 1.5612 - accuracy: 0.4350 - val_loss: 1.2530 - val_accuracy: 0.5532 Epoch 4/200 88/88 [==============================] - 5s 58ms/step - loss: 1.3317 - accuracy: 0.5292 - val_loss: 1.6046 - val_accuracy: 0.4078 Epoch 5/200 88/88 [==============================] - 5s 58ms/step - loss: 1.1190 - accuracy: 0.6072 - val_loss: 1.0566 - val_accuracy: 0.6210 Epoch 6/200 88/88 [==============================] - 5s 58ms/step - loss: 0.9558 - accuracy: 0.6665 - val_loss: 1.1510 - val_accuracy: 0.5876 Epoch 7/200 88/88 [==============================] - 5s 58ms/step - loss: 0.8051 - accuracy: 0.7189 - val_loss: 1.0886 - val_accuracy: 0.6240 Epoch 8/200 88/88 [==============================] - 5s 58ms/step - loss: 0.6528 - accuracy: 0.7717 - val_loss: 0.8594 - val_accuracy: 0.7024 Epoch 9/200 88/88 [==============================] - 5s 58ms/step - loss: 0.5413 - accuracy: 0.8142 - val_loss: 0.9270 - val_accuracy: 0.6990 Epoch 10/200 88/88 [==============================] - 5s 58ms/step - loss: 0.4037 - accuracy: 0.8587 - val_loss: 0.9000 - val_accuracy: 0.7324 Epoch 11/200 88/88 [==============================] - 5s 58ms/step - loss: 0.2970 - accuracy: 0.9012 - val_loss: 0.8652 - val_accuracy: 0.7604 Epoch 12/200 88/88 [==============================] - 5s 58ms/step - loss: 0.2143 - accuracy: 0.9287 - val_loss: 0.9855 - val_accuracy: 0.7378 Epoch 13/200 88/88 [==============================] - 5s 58ms/step - loss: 0.1555 - accuracy: 0.9485 - val_loss: 1.0680 - val_accuracy: 0.7420 Epoch 14/200 88/88 [==============================] - 5s 58ms/step - loss: 0.1396 - accuracy: 0.9552 - val_loss: 1.1827 - val_accuracy: 0.7224 Epoch 15/200 88/88 [==============================] - 5s 59ms/step - loss: 0.1162 - accuracy: 0.9657 - val_loss: 1.1599 - val_accuracy: 0.7538 Epoch 16/200 88/88 [==============================] - 5s 58ms/step - loss: 0.0986 - accuracy: 0.9689 - val_loss: 1.2358 - val_accuracy: 0.7604 Epoch 17/200 88/88 [==============================] - 5s 58ms/step - loss: 0.0964 - accuracy: 0.9701 - val_loss: 1.2135 - val_accuracy: 0.7540 Epoch 18/200 88/88 [==============================] - 5s 58ms/step - loss: 0.0960 - accuracy: 0.9729 - val_loss: 1.6373 - val_accuracy: 0.6530 Epoch 19/200 88/88 [==============================] - 5s 58ms/step - loss: 0.0834 - accuracy: 0.9748 - val_loss: 1.4751 - val_accuracy: 0.7034 Epoch 20/200 88/88 [==============================] - 5s 58ms/step - loss: 0.0735 - accuracy: 0.9776 - val_loss: 2.1074 - val_accuracy: 0.7084 Epoch 21/200 88/88 [==============================] - 5s 58ms/step - loss: 0.1105 - accuracy: 0.9724 - val_loss: 1.5005 - val_accuracy: 0.7546 Finished model training in 114s Model performance with training set 1407/1407 [==============================] - 6s 4ms/step - loss: 0.0174 - accuracy: 0.9940 Evaluating model performance with validation set 157/157 [==============================] - 1s 4ms/step - loss: 1.5005 - accuracy: 0.7546
plot_history(history_8)
pred8 = model_8.predict(x_test_norm)
pred8 = np.argmax(pred8, axis=1)
print_validation_report(y_test, pred8)
Classification Report
precision recall f1-score support
0 0.79 0.80 0.80 1000
1 0.94 0.74 0.83 1000
2 0.64 0.71 0.67 1000
3 0.60 0.55 0.57 1000
4 0.76 0.67 0.71 1000
5 0.58 0.72 0.64 1000
6 0.85 0.77 0.81 1000
7 0.81 0.79 0.80 1000
8 0.84 0.87 0.86 1000
9 0.76 0.87 0.81 1000
accuracy 0.75 10000
macro avg 0.76 0.75 0.75 10000
weighted avg 0.76 0.75 0.75 10000
Accuracy Score: 0.7487
Root Mean Square Error: 2.0961393083476105
plot_confusion_matrix_labeled( y_test, pred8)
Experiment 9¶
model_9 = Sequential([
Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu,input_shape=x_train_norm.shape[1:], padding='same'),
MaxPool2D((2, 2),strides=2),
Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same'),
MaxPool2D((2, 2),strides=2),
Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same'),
MaxPool2D((2, 2),strides=2),
Conv2D(filters=512, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same' ),
MaxPool2D((2, 2),strides=2),
Flatten(),
Dense(units= 256,activation=tf.nn.relu),
Dense(units= 512,activation=tf.nn.relu),
Dense(units=10, activation=tf.nn.softmax)
])
model_9.summary()
Model: "sequential_38" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_135 (Conv2D) (None, 32, 32, 64) 1792 _________________________________________________________________ max_pooling2d_135 (MaxPoolin (None, 16, 16, 64) 0 _________________________________________________________________ conv2d_136 (Conv2D) (None, 16, 16, 128) 73856 _________________________________________________________________ max_pooling2d_136 (MaxPoolin (None, 8, 8, 128) 0 _________________________________________________________________ conv2d_137 (Conv2D) (None, 8, 8, 256) 295168 _________________________________________________________________ max_pooling2d_137 (MaxPoolin (None, 4, 4, 256) 0 _________________________________________________________________ conv2d_138 (Conv2D) (None, 4, 4, 512) 1180160 _________________________________________________________________ max_pooling2d_138 (MaxPoolin (None, 2, 2, 512) 0 _________________________________________________________________ flatten_32 (Flatten) (None, 2048) 0 _________________________________________________________________ dense_64 (Dense) (None, 256) 524544 _________________________________________________________________ dense_65 (Dense) (None, 512) 131584 _________________________________________________________________ dense_66 (Dense) (None, 10) 5130 ================================================================= Total params: 2,212,234 Trainable params: 2,212,234 Non-trainable params: 0 _________________________________________________________________
history_9, model_9 = compile_train_model(model_9,
x_train_norm, y_train_split,
x_valid_norm, y_valid_split
)
Epoch 1/200 88/88 [==============================] - 4s 29ms/step - loss: 2.2267 - accuracy: 0.1872 - val_loss: 1.9045 - val_accuracy: 0.3006 Epoch 2/200 88/88 [==============================] - 2s 26ms/step - loss: 1.7923 - accuracy: 0.3402 - val_loss: 1.5815 - val_accuracy: 0.4328 Epoch 3/200 88/88 [==============================] - 2s 26ms/step - loss: 1.5318 - accuracy: 0.4429 - val_loss: 1.4213 - val_accuracy: 0.4614 Epoch 4/200 88/88 [==============================] - 2s 26ms/step - loss: 1.3395 - accuracy: 0.5155 - val_loss: 1.1906 - val_accuracy: 0.5664 Epoch 5/200 88/88 [==============================] - 2s 26ms/step - loss: 1.1644 - accuracy: 0.5817 - val_loss: 1.1076 - val_accuracy: 0.6020 Epoch 6/200 88/88 [==============================] - 2s 26ms/step - loss: 1.0378 - accuracy: 0.6344 - val_loss: 0.9631 - val_accuracy: 0.6598 Epoch 7/200 88/88 [==============================] - 2s 26ms/step - loss: 0.8950 - accuracy: 0.6874 - val_loss: 0.9708 - val_accuracy: 0.6488 Epoch 8/200 88/88 [==============================] - 2s 26ms/step - loss: 0.7771 - accuracy: 0.7285 - val_loss: 0.8414 - val_accuracy: 0.7112 Epoch 9/200 88/88 [==============================] - 2s 26ms/step - loss: 0.6728 - accuracy: 0.7665 - val_loss: 0.8914 - val_accuracy: 0.6936 Epoch 10/200 88/88 [==============================] - 2s 26ms/step - loss: 0.5565 - accuracy: 0.8052 - val_loss: 0.8442 - val_accuracy: 0.7184 Epoch 11/200 88/88 [==============================] - 2s 26ms/step - loss: 0.4592 - accuracy: 0.8390 - val_loss: 0.7589 - val_accuracy: 0.7510 Epoch 12/200 88/88 [==============================] - 2s 26ms/step - loss: 0.3593 - accuracy: 0.8740 - val_loss: 1.0360 - val_accuracy: 0.7154 Epoch 13/200 88/88 [==============================] - 2s 26ms/step - loss: 0.2793 - accuracy: 0.9035 - val_loss: 0.9942 - val_accuracy: 0.7286 Epoch 14/200 88/88 [==============================] - 2s 26ms/step - loss: 0.2181 - accuracy: 0.9276 - val_loss: 0.9851 - val_accuracy: 0.7518 Epoch 15/200 88/88 [==============================] - 2s 26ms/step - loss: 0.1742 - accuracy: 0.9418 - val_loss: 1.1347 - val_accuracy: 0.7384 Epoch 16/200 88/88 [==============================] - 2s 26ms/step - loss: 0.1521 - accuracy: 0.9501 - val_loss: 1.1149 - val_accuracy: 0.7422 Epoch 17/200 88/88 [==============================] - 2s 26ms/step - loss: 0.1451 - accuracy: 0.9553 - val_loss: 1.1077 - val_accuracy: 0.7474 Epoch 18/200 88/88 [==============================] - 2s 26ms/step - loss: 0.1153 - accuracy: 0.9655 - val_loss: 1.1961 - val_accuracy: 0.7344 Epoch 19/200 88/88 [==============================] - 2s 26ms/step - loss: 0.1020 - accuracy: 0.9675 - val_loss: 1.2493 - val_accuracy: 0.7230 Epoch 20/200 88/88 [==============================] - 2s 26ms/step - loss: 0.0852 - accuracy: 0.9730 - val_loss: 1.3159 - val_accuracy: 0.7530 Epoch 21/200 88/88 [==============================] - 2s 26ms/step - loss: 0.0810 - accuracy: 0.9747 - val_loss: 1.2787 - val_accuracy: 0.7478 Epoch 22/200 88/88 [==============================] - 2s 26ms/step - loss: 0.0824 - accuracy: 0.9746 - val_loss: 1.3565 - val_accuracy: 0.7482 Epoch 23/200 88/88 [==============================] - 2s 26ms/step - loss: 0.0875 - accuracy: 0.9762 - val_loss: 1.4550 - val_accuracy: 0.7474 Epoch 24/200 88/88 [==============================] - 2s 26ms/step - loss: 0.0968 - accuracy: 0.9712 - val_loss: 1.2847 - val_accuracy: 0.7480 Epoch 25/200 88/88 [==============================] - 2s 26ms/step - loss: 0.0641 - accuracy: 0.9805 - val_loss: 1.3426 - val_accuracy: 0.7422 Epoch 26/200 88/88 [==============================] - 2s 26ms/step - loss: 0.0696 - accuracy: 0.9799 - val_loss: 1.5646 - val_accuracy: 0.7404 Epoch 27/200 88/88 [==============================] - 2s 26ms/step - loss: 0.0610 - accuracy: 0.9810 - val_loss: 1.4339 - val_accuracy: 0.7534 Epoch 28/200 88/88 [==============================] - 2s 26ms/step - loss: 0.0640 - accuracy: 0.9795 - val_loss: 1.4894 - val_accuracy: 0.7542 Epoch 29/200 88/88 [==============================] - 2s 26ms/step - loss: 0.0603 - accuracy: 0.9816 - val_loss: 1.4375 - val_accuracy: 0.7504 Epoch 30/200 88/88 [==============================] - 2s 26ms/step - loss: 0.0672 - accuracy: 0.9804 - val_loss: 1.5984 - val_accuracy: 0.7004 Epoch 31/200 88/88 [==============================] - 2s 26ms/step - loss: 0.0514 - accuracy: 0.9841 - val_loss: 1.6345 - val_accuracy: 0.7488 Epoch 32/200 88/88 [==============================] - 2s 26ms/step - loss: 0.0433 - accuracy: 0.9865 - val_loss: 1.8941 - val_accuracy: 0.7148 Epoch 33/200 88/88 [==============================] - 2s 26ms/step - loss: 0.0482 - accuracy: 0.9847 - val_loss: 1.5246 - val_accuracy: 0.7382 Epoch 34/200 88/88 [==============================] - 2s 26ms/step - loss: 0.0557 - accuracy: 0.9835 - val_loss: 1.5422 - val_accuracy: 0.7518 Epoch 35/200 88/88 [==============================] - 2s 26ms/step - loss: 0.0369 - accuracy: 0.9887 - val_loss: 1.7272 - val_accuracy: 0.7426 Epoch 36/200 88/88 [==============================] - 2s 26ms/step - loss: 0.0401 - accuracy: 0.9871 - val_loss: 1.5910 - val_accuracy: 0.7550 Epoch 37/200 88/88 [==============================] - 2s 26ms/step - loss: 0.0488 - accuracy: 0.9850 - val_loss: 1.5132 - val_accuracy: 0.7502 Epoch 38/200 88/88 [==============================] - 2s 26ms/step - loss: 0.0406 - accuracy: 0.9879 - val_loss: 1.6776 - val_accuracy: 0.7544 Epoch 39/200 88/88 [==============================] - 2s 26ms/step - loss: 0.0490 - accuracy: 0.9855 - val_loss: 1.5404 - val_accuracy: 0.7504 Epoch 40/200 88/88 [==============================] - 2s 27ms/step - loss: 0.0474 - accuracy: 0.9871 - val_loss: 1.7463 - val_accuracy: 0.7560 Epoch 41/200 88/88 [==============================] - 2s 26ms/step - loss: 0.0383 - accuracy: 0.9891 - val_loss: 1.7948 - val_accuracy: 0.7518 Epoch 42/200 88/88 [==============================] - 2s 26ms/step - loss: 0.0372 - accuracy: 0.9888 - val_loss: 1.9435 - val_accuracy: 0.7376 Epoch 43/200 88/88 [==============================] - 2s 26ms/step - loss: 0.0337 - accuracy: 0.9901 - val_loss: 2.1479 - val_accuracy: 0.7338 Epoch 44/200 88/88 [==============================] - 2s 26ms/step - loss: 0.0416 - accuracy: 0.9887 - val_loss: 1.8695 - val_accuracy: 0.7602 Epoch 45/200 88/88 [==============================] - 2s 26ms/step - loss: 0.0342 - accuracy: 0.9896 - val_loss: 1.8445 - val_accuracy: 0.7598 Epoch 46/200 88/88 [==============================] - 2s 26ms/step - loss: 0.0350 - accuracy: 0.9899 - val_loss: 1.8902 - val_accuracy: 0.7468 Epoch 47/200 88/88 [==============================] - 2s 26ms/step - loss: 0.0329 - accuracy: 0.9899 - val_loss: 1.7696 - val_accuracy: 0.7558 Epoch 48/200 88/88 [==============================] - 2s 26ms/step - loss: 0.0377 - accuracy: 0.9888 - val_loss: 1.6006 - val_accuracy: 0.7566 Epoch 49/200 88/88 [==============================] - 2s 26ms/step - loss: 0.0301 - accuracy: 0.9915 - val_loss: 2.2440 - val_accuracy: 0.7308 Epoch 50/200 88/88 [==============================] - 2s 26ms/step - loss: 0.0338 - accuracy: 0.9897 - val_loss: 1.9008 - val_accuracy: 0.7514 Epoch 51/200 88/88 [==============================] - 2s 26ms/step - loss: 0.0275 - accuracy: 0.9916 - val_loss: 2.1092 - val_accuracy: 0.7282 Epoch 52/200 88/88 [==============================] - 2s 26ms/step - loss: 0.0499 - accuracy: 0.9874 - val_loss: 2.0808 - val_accuracy: 0.7346 Epoch 53/200 88/88 [==============================] - 2s 26ms/step - loss: 0.0298 - accuracy: 0.9912 - val_loss: 1.9691 - val_accuracy: 0.7322 Epoch 54/200 88/88 [==============================] - 2s 26ms/step - loss: 0.0318 - accuracy: 0.9917 - val_loss: 1.9522 - val_accuracy: 0.7506 Finished model training in 128s Model performance with training set 1407/1407 [==============================] - 5s 3ms/step - loss: 0.0259 - accuracy: 0.9918 Evaluating model performance with validation set 157/157 [==============================] - 1s 3ms/step - loss: 1.9522 - accuracy: 0.7506
plot_history(history_9)
pred9 = model_9.predict(x_test_norm)
pred9 = np.argmax(pred9, axis=1)
print_validation_report(y_test, pred9)
Classification Report
precision recall f1-score support
0 0.77 0.79 0.78 1000
1 0.82 0.88 0.85 1000
2 0.68 0.63 0.65 1000
3 0.51 0.59 0.55 1000
4 0.67 0.71 0.69 1000
5 0.67 0.60 0.63 1000
6 0.77 0.82 0.80 1000
7 0.87 0.70 0.78 1000
8 0.84 0.86 0.85 1000
9 0.82 0.81 0.81 1000
accuracy 0.74 10000
macro avg 0.74 0.74 0.74 10000
weighted avg 0.74 0.74 0.74 10000
Accuracy Score: 0.7379
Root Mean Square Error: 2.088971038573776
plot_confusion_matrix_labeled( y_test, pred9)
Experiment 10¶
model_10 = Sequential([
Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu,input_shape=x_train_norm.shape[1:], padding='same'),
MaxPool2D((2, 2),strides=2),
Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same'),
MaxPool2D((2, 2),strides=2),
Conv2D(filters= 512, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same'),
MaxPool2D((2, 2),strides=2),
Conv2D(filters=1024, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same' ),
MaxPool2D((2, 2),strides=2),
Flatten(),
Dense(units= 256,activation=tf.nn.relu),
Dense(units= 512,activation=tf.nn.relu),
Dense(units=10, activation=tf.nn.softmax)
])
model_10.summary()
Model: "sequential_39" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_139 (Conv2D) (None, 32, 32, 128) 3584 _________________________________________________________________ max_pooling2d_139 (MaxPoolin (None, 16, 16, 128) 0 _________________________________________________________________ conv2d_140 (Conv2D) (None, 16, 16, 256) 295168 _________________________________________________________________ max_pooling2d_140 (MaxPoolin (None, 8, 8, 256) 0 _________________________________________________________________ conv2d_141 (Conv2D) (None, 8, 8, 512) 1180160 _________________________________________________________________ max_pooling2d_141 (MaxPoolin (None, 4, 4, 512) 0 _________________________________________________________________ conv2d_142 (Conv2D) (None, 4, 4, 1024) 4719616 _________________________________________________________________ max_pooling2d_142 (MaxPoolin (None, 2, 2, 1024) 0 _________________________________________________________________ flatten_33 (Flatten) (None, 4096) 0 _________________________________________________________________ dense_67 (Dense) (None, 256) 1048832 _________________________________________________________________ dense_68 (Dense) (None, 512) 131584 _________________________________________________________________ dense_69 (Dense) (None, 10) 5130 ================================================================= Total params: 7,384,074 Trainable params: 7,384,074 Non-trainable params: 0 _________________________________________________________________
history_10, model_10 = compile_train_model(model_10,
x_train_norm, y_train_split,
x_valid_norm, y_valid_split
)
Epoch 1/200 88/88 [==============================] - 6s 61ms/step - loss: 2.7110 - accuracy: 0.1574 - val_loss: 2.0847 - val_accuracy: 0.2050 Epoch 2/200 88/88 [==============================] - 5s 58ms/step - loss: 1.9455 - accuracy: 0.2851 - val_loss: 1.8263 - val_accuracy: 0.3188 Epoch 3/200 88/88 [==============================] - 5s 58ms/step - loss: 1.6493 - accuracy: 0.4004 - val_loss: 1.4662 - val_accuracy: 0.4780 Epoch 4/200 88/88 [==============================] - 5s 58ms/step - loss: 1.4201 - accuracy: 0.4877 - val_loss: 1.2207 - val_accuracy: 0.5702 Epoch 5/200 88/88 [==============================] - 5s 58ms/step - loss: 1.2165 - accuracy: 0.5693 - val_loss: 1.1388 - val_accuracy: 0.6032 Epoch 6/200 88/88 [==============================] - 5s 58ms/step - loss: 1.0411 - accuracy: 0.6330 - val_loss: 1.0361 - val_accuracy: 0.6474 Epoch 7/200 88/88 [==============================] - 5s 58ms/step - loss: 0.8847 - accuracy: 0.6893 - val_loss: 1.1172 - val_accuracy: 0.6184 Epoch 8/200 88/88 [==============================] - 5s 58ms/step - loss: 0.7623 - accuracy: 0.7355 - val_loss: 0.9143 - val_accuracy: 0.6840 Epoch 9/200 88/88 [==============================] - 5s 58ms/step - loss: 0.6240 - accuracy: 0.7830 - val_loss: 0.8755 - val_accuracy: 0.7176 Epoch 10/200 88/88 [==============================] - 5s 58ms/step - loss: 0.5070 - accuracy: 0.8257 - val_loss: 0.9414 - val_accuracy: 0.7092 Epoch 11/200 88/88 [==============================] - 5s 58ms/step - loss: 0.3680 - accuracy: 0.8720 - val_loss: 0.9208 - val_accuracy: 0.7260 Epoch 12/200 88/88 [==============================] - 5s 58ms/step - loss: 0.2686 - accuracy: 0.9078 - val_loss: 0.8942 - val_accuracy: 0.7552 Epoch 13/200 88/88 [==============================] - 5s 58ms/step - loss: 0.2077 - accuracy: 0.9306 - val_loss: 1.0494 - val_accuracy: 0.7306 Epoch 14/200 88/88 [==============================] - 5s 58ms/step - loss: 0.1864 - accuracy: 0.9428 - val_loss: 1.2104 - val_accuracy: 0.6712 Epoch 15/200 88/88 [==============================] - 5s 58ms/step - loss: 0.1455 - accuracy: 0.9539 - val_loss: 1.1321 - val_accuracy: 0.7590 Epoch 16/200 88/88 [==============================] - 5s 58ms/step - loss: 0.1260 - accuracy: 0.9610 - val_loss: 1.2307 - val_accuracy: 0.7422 Epoch 17/200 88/88 [==============================] - 5s 58ms/step - loss: 0.1053 - accuracy: 0.9673 - val_loss: 1.4270 - val_accuracy: 0.7428 Epoch 18/200 88/88 [==============================] - 5s 58ms/step - loss: 0.1171 - accuracy: 0.9648 - val_loss: 1.2440 - val_accuracy: 0.7592 Epoch 19/200 88/88 [==============================] - 5s 58ms/step - loss: 0.0912 - accuracy: 0.9722 - val_loss: 1.3031 - val_accuracy: 0.7508 Epoch 20/200 88/88 [==============================] - 5s 58ms/step - loss: 0.1348 - accuracy: 0.9617 - val_loss: 1.2002 - val_accuracy: 0.7446 Epoch 21/200 88/88 [==============================] - 5s 58ms/step - loss: 0.0666 - accuracy: 0.9799 - val_loss: 1.3306 - val_accuracy: 0.7388 Epoch 22/200 88/88 [==============================] - 5s 58ms/step - loss: 0.0906 - accuracy: 0.9730 - val_loss: 1.5248 - val_accuracy: 0.7072 Epoch 23/200 88/88 [==============================] - 5s 58ms/step - loss: 0.0794 - accuracy: 0.9763 - val_loss: 1.4979 - val_accuracy: 0.7562 Epoch 24/200 88/88 [==============================] - 5s 58ms/step - loss: 0.0757 - accuracy: 0.9780 - val_loss: 1.5435 - val_accuracy: 0.7560 Epoch 25/200 88/88 [==============================] - 5s 58ms/step - loss: 0.0890 - accuracy: 0.9759 - val_loss: 1.4850 - val_accuracy: 0.7526 Epoch 26/200 88/88 [==============================] - 5s 58ms/step - loss: 0.0596 - accuracy: 0.9824 - val_loss: 1.5346 - val_accuracy: 0.7566 Epoch 27/200 88/88 [==============================] - 5s 58ms/step - loss: 0.0693 - accuracy: 0.9799 - val_loss: 1.5326 - val_accuracy: 0.7580 Epoch 28/200 88/88 [==============================] - 5s 58ms/step - loss: 0.0665 - accuracy: 0.9819 - val_loss: 1.5594 - val_accuracy: 0.7452 Finished model training in 146s Model performance with training set 1407/1407 [==============================] - 6s 4ms/step - loss: 0.0416 - accuracy: 0.9868 Evaluating model performance with validation set 157/157 [==============================] - 1s 4ms/step - loss: 1.5594 - accuracy: 0.7452
plot_history(history_10)
pred10 = model_10.predict(x_test_norm)
pred10 = np.argmax(pred10, axis=1)
print_validation_report(y_test, pred10)
Classification Report
precision recall f1-score support
0 0.82 0.74 0.78 1000
1 0.92 0.78 0.84 1000
2 0.61 0.74 0.66 1000
3 0.52 0.61 0.56 1000
4 0.69 0.73 0.71 1000
5 0.77 0.50 0.61 1000
6 0.79 0.83 0.81 1000
7 0.85 0.75 0.80 1000
8 0.78 0.88 0.83 1000
9 0.77 0.87 0.82 1000
accuracy 0.74 10000
macro avg 0.75 0.74 0.74 10000
weighted avg 0.75 0.74 0.74 10000
Accuracy Score: 0.7419
Root Mean Square Error: 2.1354624791833734
plot_confusion_matrix_labeled( y_test, pred10)
Experiment 11¶
model_11 = Sequential([
Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu,input_shape=x_train_norm.shape[1:], padding='same'),
MaxPool2D((2, 2),strides=2),
BatchNormalization(),
Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same'),
MaxPool2D((2, 2),strides=2),
BatchNormalization(),
Conv2D(filters= 512, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same'),
MaxPool2D((2, 2),strides=2),
BatchNormalization(),
Conv2D(filters=1024, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same' ),
MaxPool2D((2, 2),strides=2),
BatchNormalization(),
Flatten(),
Dense(units= 256,activation=tf.nn.relu),
BatchNormalization(),
Dense(units= 512,activation=tf.nn.relu),
BatchNormalization(),
Dense(units=10, activation=tf.nn.softmax)
])
model_11.summary()
Model: "sequential_40" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_143 (Conv2D) (None, 32, 32, 128) 3584 _________________________________________________________________ max_pooling2d_143 (MaxPoolin (None, 16, 16, 128) 0 _________________________________________________________________ batch_normalization_2 (Batch (None, 16, 16, 128) 512 _________________________________________________________________ conv2d_144 (Conv2D) (None, 16, 16, 256) 295168 _________________________________________________________________ max_pooling2d_144 (MaxPoolin (None, 8, 8, 256) 0 _________________________________________________________________ batch_normalization_3 (Batch (None, 8, 8, 256) 1024 _________________________________________________________________ conv2d_145 (Conv2D) (None, 8, 8, 512) 1180160 _________________________________________________________________ max_pooling2d_145 (MaxPoolin (None, 4, 4, 512) 0 _________________________________________________________________ batch_normalization_4 (Batch (None, 4, 4, 512) 2048 _________________________________________________________________ conv2d_146 (Conv2D) (None, 4, 4, 1024) 4719616 _________________________________________________________________ max_pooling2d_146 (MaxPoolin (None, 2, 2, 1024) 0 _________________________________________________________________ batch_normalization_5 (Batch (None, 2, 2, 1024) 4096 _________________________________________________________________ flatten_34 (Flatten) (None, 4096) 0 _________________________________________________________________ dense_70 (Dense) (None, 256) 1048832 _________________________________________________________________ batch_normalization_6 (Batch (None, 256) 1024 _________________________________________________________________ dense_71 (Dense) (None, 512) 131584 _________________________________________________________________ batch_normalization_7 (Batch (None, 512) 2048 _________________________________________________________________ dense_72 (Dense) (None, 10) 5130 ================================================================= Total params: 7,394,826 Trainable params: 7,389,450 Non-trainable params: 5,376 _________________________________________________________________
history_11, model_11 = compile_train_model(model_11,
x_train_norm, y_train_split,
x_valid_norm, y_valid_split
)
Epoch 1/200 88/88 [==============================] - 8s 66ms/step - loss: 1.6984 - accuracy: 0.4247 - val_loss: 5.4169 - val_accuracy: 0.0970 Epoch 2/200 88/88 [==============================] - 5s 61ms/step - loss: 0.9923 - accuracy: 0.6502 - val_loss: 5.1678 - val_accuracy: 0.0970 Epoch 3/200 88/88 [==============================] - 5s 62ms/step - loss: 0.6871 - accuracy: 0.7584 - val_loss: 4.3335 - val_accuracy: 0.1630 Epoch 4/200 88/88 [==============================] - 5s 62ms/step - loss: 0.4714 - accuracy: 0.8340 - val_loss: 4.2017 - val_accuracy: 0.1208 Epoch 5/200 88/88 [==============================] - 5s 62ms/step - loss: 0.2988 - accuracy: 0.8945 - val_loss: 4.6936 - val_accuracy: 0.1656 Epoch 6/200 88/88 [==============================] - 5s 62ms/step - loss: 0.1777 - accuracy: 0.9380 - val_loss: 3.7097 - val_accuracy: 0.2516 Epoch 7/200 88/88 [==============================] - 5s 62ms/step - loss: 0.1143 - accuracy: 0.9622 - val_loss: 1.3058 - val_accuracy: 0.6716 Epoch 8/200 88/88 [==============================] - 6s 64ms/step - loss: 0.0855 - accuracy: 0.9708 - val_loss: 1.7205 - val_accuracy: 0.6740 Epoch 9/200 88/88 [==============================] - 5s 62ms/step - loss: 0.0694 - accuracy: 0.9770 - val_loss: 1.0085 - val_accuracy: 0.7966 Epoch 10/200 88/88 [==============================] - 5s 62ms/step - loss: 0.0566 - accuracy: 0.9810 - val_loss: 1.2147 - val_accuracy: 0.7432 Epoch 11/200 88/88 [==============================] - 5s 62ms/step - loss: 0.0522 - accuracy: 0.9814 - val_loss: 1.1445 - val_accuracy: 0.7734 Epoch 12/200 88/88 [==============================] - 5s 62ms/step - loss: 0.0471 - accuracy: 0.9839 - val_loss: 1.2287 - val_accuracy: 0.7708 Epoch 13/200 88/88 [==============================] - 5s 62ms/step - loss: 0.0419 - accuracy: 0.9860 - val_loss: 1.0271 - val_accuracy: 0.8030 Epoch 14/200 88/88 [==============================] - 5s 62ms/step - loss: 0.0388 - accuracy: 0.9869 - val_loss: 1.2937 - val_accuracy: 0.7646 Epoch 15/200 88/88 [==============================] - 5s 62ms/step - loss: 0.0348 - accuracy: 0.9883 - val_loss: 1.5663 - val_accuracy: 0.7394 Epoch 16/200 88/88 [==============================] - 5s 62ms/step - loss: 0.0351 - accuracy: 0.9882 - val_loss: 1.3151 - val_accuracy: 0.7760 Epoch 17/200 88/88 [==============================] - 5s 62ms/step - loss: 0.0282 - accuracy: 0.9902 - val_loss: 1.2205 - val_accuracy: 0.7918 Epoch 18/200 88/88 [==============================] - 5s 62ms/step - loss: 0.0305 - accuracy: 0.9897 - val_loss: 1.3152 - val_accuracy: 0.7690 Epoch 19/200 88/88 [==============================] - 5s 62ms/step - loss: 0.0228 - accuracy: 0.9920 - val_loss: 1.1780 - val_accuracy: 0.7942 Epoch 20/200 88/88 [==============================] - 5s 62ms/step - loss: 0.0267 - accuracy: 0.9910 - val_loss: 1.3953 - val_accuracy: 0.7638 Epoch 21/200 88/88 [==============================] - 5s 62ms/step - loss: 0.0211 - accuracy: 0.9928 - val_loss: 1.2002 - val_accuracy: 0.7938 Epoch 22/200 88/88 [==============================] - 5s 62ms/step - loss: 0.0234 - accuracy: 0.9921 - val_loss: 1.4061 - val_accuracy: 0.7810 Epoch 23/200 88/88 [==============================] - 5s 62ms/step - loss: 0.0212 - accuracy: 0.9930 - val_loss: 1.3650 - val_accuracy: 0.7852 Finished model training in 130s Model performance with training set 1407/1407 [==============================] - 7s 5ms/step - loss: 0.0532 - accuracy: 0.9825 Evaluating model performance with validation set 157/157 [==============================] - 1s 5ms/step - loss: 1.3650 - accuracy: 0.7852
plot_history(history_11)
pred11 = model_11.predict(x_test_norm)
pred11 = np.argmax(pred11, axis=1)
print_validation_report(y_test, pred11)
Classification Report
precision recall f1-score support
0 0.91 0.73 0.81 1000
1 0.90 0.88 0.89 1000
2 0.74 0.70 0.72 1000
3 0.70 0.59 0.64 1000
4 0.74 0.77 0.75 1000
5 0.72 0.69 0.70 1000
6 0.70 0.93 0.80 1000
7 0.87 0.80 0.83 1000
8 0.80 0.93 0.86 1000
9 0.85 0.87 0.86 1000
accuracy 0.79 10000
macro avg 0.79 0.79 0.79 10000
weighted avg 0.79 0.79 0.79 10000
Accuracy Score: 0.7879
Root Mean Square Error: 1.8829232591903473
plot_confusion_matrix_labeled( y_test, pred11)
Experiment 12¶
model_12 = Sequential([
Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu,input_shape=x_train_norm.shape[1:], padding='same'),
MaxPool2D((2, 2),strides=2),
Dropout(0.3),
BatchNormalization(),
Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same'),
MaxPool2D((2, 2),strides=2),
Dropout(0.3),
BatchNormalization(),
Conv2D(filters= 512, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same'),
MaxPool2D((2, 2),strides=2),
Dropout(0.3),
BatchNormalization(),
Conv2D(filters=1024, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same' ),
MaxPool2D((2, 2),strides=2),
Dropout(0.3),
BatchNormalization(),
Flatten(),
Dense(units= 256,activation=tf.nn.relu),
BatchNormalization(),
Dropout(0.3),
Dense(units= 512,activation=tf.nn.relu),
BatchNormalization(),
Dropout(0.3),
Dense(units=10, activation=tf.nn.softmax)
])
model_12.summary()
Model: "sequential_41" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_147 (Conv2D) (None, 32, 32, 128) 3584 _________________________________________________________________ max_pooling2d_147 (MaxPoolin (None, 16, 16, 128) 0 _________________________________________________________________ dropout_8 (Dropout) (None, 16, 16, 128) 0 _________________________________________________________________ batch_normalization_8 (Batch (None, 16, 16, 128) 512 _________________________________________________________________ conv2d_148 (Conv2D) (None, 16, 16, 256) 295168 _________________________________________________________________ max_pooling2d_148 (MaxPoolin (None, 8, 8, 256) 0 _________________________________________________________________ dropout_9 (Dropout) (None, 8, 8, 256) 0 _________________________________________________________________ batch_normalization_9 (Batch (None, 8, 8, 256) 1024 _________________________________________________________________ conv2d_149 (Conv2D) (None, 8, 8, 512) 1180160 _________________________________________________________________ max_pooling2d_149 (MaxPoolin (None, 4, 4, 512) 0 _________________________________________________________________ dropout_10 (Dropout) (None, 4, 4, 512) 0 _________________________________________________________________ batch_normalization_10 (Batc (None, 4, 4, 512) 2048 _________________________________________________________________ conv2d_150 (Conv2D) (None, 4, 4, 1024) 4719616 _________________________________________________________________ max_pooling2d_150 (MaxPoolin (None, 2, 2, 1024) 0 _________________________________________________________________ dropout_11 (Dropout) (None, 2, 2, 1024) 0 _________________________________________________________________ batch_normalization_11 (Batc (None, 2, 2, 1024) 4096 _________________________________________________________________ flatten_35 (Flatten) (None, 4096) 0 _________________________________________________________________ dense_73 (Dense) (None, 256) 1048832 _________________________________________________________________ batch_normalization_12 (Batc (None, 256) 1024 _________________________________________________________________ dropout_12 (Dropout) (None, 256) 0 _________________________________________________________________ dense_74 (Dense) (None, 512) 131584 _________________________________________________________________ batch_normalization_13 (Batc (None, 512) 2048 _________________________________________________________________ dropout_13 (Dropout) (None, 512) 0 _________________________________________________________________ dense_75 (Dense) (None, 10) 5130 ================================================================= Total params: 7,394,826 Trainable params: 7,389,450 Non-trainable params: 5,376 _________________________________________________________________
history_12, model_12 = compile_train_model(model_12,
x_train_norm, y_train_split,
x_valid_norm, y_valid_split
)
Epoch 1/200 88/88 [==============================] - 8s 70ms/step - loss: 1.9796 - accuracy: 0.3551 - val_loss: 9.3153 - val_accuracy: 0.0970 Epoch 2/200 88/88 [==============================] - 6s 66ms/step - loss: 1.3394 - accuracy: 0.5274 - val_loss: 8.9620 - val_accuracy: 0.0970 Epoch 3/200 88/88 [==============================] - 6s 66ms/step - loss: 1.0462 - accuracy: 0.6299 - val_loss: 9.2782 - val_accuracy: 0.0970 Epoch 4/200 88/88 [==============================] - 6s 66ms/step - loss: 0.8744 - accuracy: 0.6911 - val_loss: 6.2752 - val_accuracy: 0.0974 Epoch 5/200 88/88 [==============================] - 6s 66ms/step - loss: 0.7443 - accuracy: 0.7406 - val_loss: 7.2151 - val_accuracy: 0.1052 Epoch 6/200 88/88 [==============================] - 6s 66ms/step - loss: 0.6408 - accuracy: 0.7755 - val_loss: 6.1668 - val_accuracy: 0.1074 Epoch 7/200 88/88 [==============================] - 6s 66ms/step - loss: 0.5554 - accuracy: 0.8066 - val_loss: 2.1501 - val_accuracy: 0.4460 Epoch 8/200 88/88 [==============================] - 6s 66ms/step - loss: 0.4846 - accuracy: 0.8303 - val_loss: 1.2883 - val_accuracy: 0.6262 Epoch 9/200 88/88 [==============================] - 6s 66ms/step - loss: 0.4267 - accuracy: 0.8516 - val_loss: 0.7710 - val_accuracy: 0.7554 Epoch 10/200 88/88 [==============================] - 6s 66ms/step - loss: 0.3772 - accuracy: 0.8687 - val_loss: 0.7008 - val_accuracy: 0.7834 Epoch 11/200 88/88 [==============================] - 6s 66ms/step - loss: 0.3269 - accuracy: 0.8846 - val_loss: 0.6887 - val_accuracy: 0.7956 Epoch 12/200 88/88 [==============================] - 6s 66ms/step - loss: 0.2912 - accuracy: 0.8978 - val_loss: 0.7545 - val_accuracy: 0.7876 Epoch 13/200 88/88 [==============================] - 6s 66ms/step - loss: 0.2610 - accuracy: 0.9074 - val_loss: 1.8041 - val_accuracy: 0.5976 Epoch 14/200 88/88 [==============================] - 6s 66ms/step - loss: 0.2342 - accuracy: 0.9171 - val_loss: 0.6227 - val_accuracy: 0.8158 Epoch 15/200 88/88 [==============================] - 6s 66ms/step - loss: 0.2153 - accuracy: 0.9234 - val_loss: 0.7388 - val_accuracy: 0.7980 Epoch 16/200 88/88 [==============================] - 6s 66ms/step - loss: 0.1961 - accuracy: 0.9311 - val_loss: 0.6219 - val_accuracy: 0.8234 Epoch 17/200 88/88 [==============================] - 6s 66ms/step - loss: 0.1797 - accuracy: 0.9369 - val_loss: 0.7981 - val_accuracy: 0.7924 Epoch 18/200 88/88 [==============================] - 6s 66ms/step - loss: 0.1659 - accuracy: 0.9421 - val_loss: 0.6671 - val_accuracy: 0.8232 Epoch 19/200 88/88 [==============================] - 6s 65ms/step - loss: 0.1595 - accuracy: 0.9443 - val_loss: 0.7273 - val_accuracy: 0.8150 Epoch 20/200 88/88 [==============================] - 6s 66ms/step - loss: 0.1490 - accuracy: 0.9473 - val_loss: 0.6925 - val_accuracy: 0.8250 Epoch 21/200 88/88 [==============================] - 6s 66ms/step - loss: 0.1356 - accuracy: 0.9530 - val_loss: 0.8459 - val_accuracy: 0.8018 Epoch 22/200 88/88 [==============================] - 6s 66ms/step - loss: 0.1305 - accuracy: 0.9552 - val_loss: 0.7232 - val_accuracy: 0.8256 Epoch 23/200 88/88 [==============================] - 6s 66ms/step - loss: 0.1268 - accuracy: 0.9558 - val_loss: 0.6577 - val_accuracy: 0.8296 Epoch 24/200 88/88 [==============================] - 6s 66ms/step - loss: 0.1196 - accuracy: 0.9580 - val_loss: 0.6874 - val_accuracy: 0.8232 Epoch 25/200 88/88 [==============================] - 6s 66ms/step - loss: 0.1098 - accuracy: 0.9618 - val_loss: 1.5282 - val_accuracy: 0.7072 Epoch 26/200 88/88 [==============================] - 6s 66ms/step - loss: 0.1087 - accuracy: 0.9619 - val_loss: 0.7603 - val_accuracy: 0.8120 Epoch 27/200 88/88 [==============================] - 6s 66ms/step - loss: 0.1029 - accuracy: 0.9643 - val_loss: 0.9850 - val_accuracy: 0.7756 Epoch 28/200 88/88 [==============================] - 6s 66ms/step - loss: 0.1026 - accuracy: 0.9642 - val_loss: 0.8325 - val_accuracy: 0.8004 Epoch 29/200 88/88 [==============================] - 6s 66ms/step - loss: 0.0945 - accuracy: 0.9668 - val_loss: 0.7553 - val_accuracy: 0.8208 Epoch 30/200 88/88 [==============================] - 6s 66ms/step - loss: 0.0908 - accuracy: 0.9679 - val_loss: 1.0063 - val_accuracy: 0.7846 Epoch 31/200 88/88 [==============================] - 6s 66ms/step - loss: 0.0900 - accuracy: 0.9693 - val_loss: 0.7322 - val_accuracy: 0.8236 Epoch 32/200 88/88 [==============================] - 6s 66ms/step - loss: 0.0840 - accuracy: 0.9710 - val_loss: 0.8683 - val_accuracy: 0.8048 Epoch 33/200 88/88 [==============================] - 6s 66ms/step - loss: 0.0834 - accuracy: 0.9711 - val_loss: 0.7404 - val_accuracy: 0.8280 Finished model training in 197s Model performance with training set 1407/1407 [==============================] - 7s 5ms/step - loss: 0.0106 - accuracy: 0.9971 Evaluating model performance with validation set 157/157 [==============================] - 1s 5ms/step - loss: 0.7404 - accuracy: 0.8280
plot_history(history_12)
pred12 = model_12.predict(x_test_norm)
pred12 = np.argmax(pred12, axis=1)
print_validation_report(y_test, pred12)
Classification Report
precision recall f1-score support
0 0.91 0.73 0.81 1000
1 0.90 0.88 0.89 1000
2 0.74 0.70 0.72 1000
3 0.70 0.59 0.64 1000
4 0.74 0.77 0.75 1000
5 0.72 0.69 0.70 1000
6 0.70 0.93 0.80 1000
7 0.87 0.80 0.83 1000
8 0.80 0.93 0.86 1000
9 0.85 0.87 0.86 1000
accuracy 0.79 10000
macro avg 0.79 0.79 0.79 10000
weighted avg 0.79 0.79 0.79 10000
Accuracy Score: 0.7879
Root Mean Square Error: 1.8829232591903473
plot_confusion_matrix_labeled( y_test, pred12)
Experiment 13¶
model_13 = Sequential([
Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu,input_shape=x_train_norm.shape[1:], padding='same'),
MaxPool2D((2, 2),strides=2),
Dropout(0.3),
BatchNormalization(),
Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same'),
MaxPool2D((2, 2),strides=2),
Dropout(0.3),
BatchNormalization(),
Conv2D(filters= 512, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same'),
MaxPool2D((2, 2),strides=2),
Dropout(0.3),
BatchNormalization(),
Conv2D(filters=1024, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same' ),
MaxPool2D((2, 2),strides=2),
Dropout(0.3),
BatchNormalization(),
Flatten(),
Dense(units= 256,activation=tf.nn.relu),
BatchNormalization(),
Dropout(0.3),
Dense(units= 512,activation=tf.nn.relu),
BatchNormalization(),
Dropout(0.3),
Dense(units=10, activation=tf.nn.softmax)
])
model_13.summary()
Model: "sequential_42" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_151 (Conv2D) (None, 32, 32, 128) 3584 _________________________________________________________________ max_pooling2d_151 (MaxPoolin (None, 16, 16, 128) 0 _________________________________________________________________ dropout_14 (Dropout) (None, 16, 16, 128) 0 _________________________________________________________________ batch_normalization_14 (Batc (None, 16, 16, 128) 512 _________________________________________________________________ conv2d_152 (Conv2D) (None, 16, 16, 256) 295168 _________________________________________________________________ max_pooling2d_152 (MaxPoolin (None, 8, 8, 256) 0 _________________________________________________________________ dropout_15 (Dropout) (None, 8, 8, 256) 0 _________________________________________________________________ batch_normalization_15 (Batc (None, 8, 8, 256) 1024 _________________________________________________________________ conv2d_153 (Conv2D) (None, 8, 8, 512) 1180160 _________________________________________________________________ max_pooling2d_153 (MaxPoolin (None, 4, 4, 512) 0 _________________________________________________________________ dropout_16 (Dropout) (None, 4, 4, 512) 0 _________________________________________________________________ batch_normalization_16 (Batc (None, 4, 4, 512) 2048 _________________________________________________________________ conv2d_154 (Conv2D) (None, 4, 4, 1024) 4719616 _________________________________________________________________ max_pooling2d_154 (MaxPoolin (None, 2, 2, 1024) 0 _________________________________________________________________ dropout_17 (Dropout) (None, 2, 2, 1024) 0 _________________________________________________________________ batch_normalization_17 (Batc (None, 2, 2, 1024) 4096 _________________________________________________________________ flatten_36 (Flatten) (None, 4096) 0 _________________________________________________________________ dense_76 (Dense) (None, 256) 1048832 _________________________________________________________________ batch_normalization_18 (Batc (None, 256) 1024 _________________________________________________________________ dropout_18 (Dropout) (None, 256) 0 _________________________________________________________________ dense_77 (Dense) (None, 512) 131584 _________________________________________________________________ batch_normalization_19 (Batc (None, 512) 2048 _________________________________________________________________ dropout_19 (Dropout) (None, 512) 0 _________________________________________________________________ dense_78 (Dense) (None, 10) 5130 ================================================================= Total params: 7,394,826 Trainable params: 7,389,450 Non-trainable params: 5,376 _________________________________________________________________
history_13, model_13 = compile_train_model(model_13,
x_train_norm, y_train_split,
x_valid_norm, y_valid_split,
optimizer = 'Adam'
)
Epoch 1/200 88/88 [==============================] - 7s 68ms/step - loss: 1.9291 - accuracy: 0.3521 - val_loss: 7.9924 - val_accuracy: 0.0970 Epoch 2/200 88/88 [==============================] - 6s 64ms/step - loss: 1.3764 - accuracy: 0.5106 - val_loss: 12.0159 - val_accuracy: 0.0970 Epoch 3/200 88/88 [==============================] - 6s 64ms/step - loss: 1.1239 - accuracy: 0.5972 - val_loss: 10.7945 - val_accuracy: 0.0970 Epoch 4/200 88/88 [==============================] - 6s 64ms/step - loss: 0.9594 - accuracy: 0.6616 - val_loss: 7.1390 - val_accuracy: 0.1118 Epoch 5/200 88/88 [==============================] - 6s 64ms/step - loss: 0.8338 - accuracy: 0.7053 - val_loss: 8.0210 - val_accuracy: 0.0970 Epoch 6/200 88/88 [==============================] - 6s 65ms/step - loss: 0.7473 - accuracy: 0.7388 - val_loss: 3.9600 - val_accuracy: 0.2080 Epoch 7/200 88/88 [==============================] - 6s 64ms/step - loss: 0.6776 - accuracy: 0.7634 - val_loss: 2.9354 - val_accuracy: 0.3074 Epoch 8/200 88/88 [==============================] - 6s 64ms/step - loss: 0.6118 - accuracy: 0.7851 - val_loss: 2.0672 - val_accuracy: 0.4658 Epoch 9/200 88/88 [==============================] - 6s 65ms/step - loss: 0.5543 - accuracy: 0.8055 - val_loss: 0.8429 - val_accuracy: 0.7348 Epoch 10/200 88/88 [==============================] - 6s 65ms/step - loss: 0.4928 - accuracy: 0.8278 - val_loss: 0.8523 - val_accuracy: 0.7362 Epoch 11/200 88/88 [==============================] - 6s 64ms/step - loss: 0.4498 - accuracy: 0.8432 - val_loss: 0.6554 - val_accuracy: 0.7860 Epoch 12/200 88/88 [==============================] - 6s 65ms/step - loss: 0.4125 - accuracy: 0.8556 - val_loss: 0.5407 - val_accuracy: 0.8246 Epoch 13/200 88/88 [==============================] - 6s 65ms/step - loss: 0.3703 - accuracy: 0.8685 - val_loss: 0.5444 - val_accuracy: 0.8210 Epoch 14/200 88/88 [==============================] - 6s 65ms/step - loss: 0.3282 - accuracy: 0.8841 - val_loss: 0.6715 - val_accuracy: 0.7926 Epoch 15/200 88/88 [==============================] - 6s 64ms/step - loss: 0.3028 - accuracy: 0.8933 - val_loss: 0.6738 - val_accuracy: 0.8026 Epoch 16/200 88/88 [==============================] - 6s 64ms/step - loss: 0.2738 - accuracy: 0.9038 - val_loss: 0.6072 - val_accuracy: 0.8238 Epoch 17/200 88/88 [==============================] - 6s 64ms/step - loss: 0.2483 - accuracy: 0.9118 - val_loss: 0.6835 - val_accuracy: 0.8122 Epoch 18/200 88/88 [==============================] - 6s 65ms/step - loss: 0.2286 - accuracy: 0.9195 - val_loss: 0.5738 - val_accuracy: 0.8258 Epoch 19/200 88/88 [==============================] - 6s 64ms/step - loss: 0.2107 - accuracy: 0.9262 - val_loss: 0.7555 - val_accuracy: 0.7918 Epoch 20/200 88/88 [==============================] - 6s 65ms/step - loss: 0.1989 - accuracy: 0.9304 - val_loss: 0.6152 - val_accuracy: 0.8292 Epoch 21/200 88/88 [==============================] - 6s 64ms/step - loss: 0.1862 - accuracy: 0.9344 - val_loss: 0.6778 - val_accuracy: 0.8180 Epoch 22/200 88/88 [==============================] - 6s 65ms/step - loss: 0.1675 - accuracy: 0.9417 - val_loss: 0.6357 - val_accuracy: 0.8276 Epoch 23/200 88/88 [==============================] - 6s 65ms/step - loss: 0.1570 - accuracy: 0.9449 - val_loss: 0.8655 - val_accuracy: 0.7890 Epoch 24/200 88/88 [==============================] - 6s 64ms/step - loss: 0.1479 - accuracy: 0.9481 - val_loss: 0.7144 - val_accuracy: 0.8174 Epoch 25/200 88/88 [==============================] - 6s 64ms/step - loss: 0.1388 - accuracy: 0.9508 - val_loss: 0.7652 - val_accuracy: 0.8122 Epoch 26/200 88/88 [==============================] - 6s 65ms/step - loss: 0.1244 - accuracy: 0.9560 - val_loss: 0.7227 - val_accuracy: 0.8190 Epoch 27/200 88/88 [==============================] - 6s 64ms/step - loss: 0.1231 - accuracy: 0.9571 - val_loss: 0.6677 - val_accuracy: 0.8302 Epoch 28/200 88/88 [==============================] - 6s 65ms/step - loss: 0.1197 - accuracy: 0.9580 - val_loss: 0.6502 - val_accuracy: 0.8376 Epoch 29/200 88/88 [==============================] - 6s 64ms/step - loss: 0.1107 - accuracy: 0.9610 - val_loss: 0.6505 - val_accuracy: 0.8388 Epoch 30/200 88/88 [==============================] - 6s 65ms/step - loss: 0.1042 - accuracy: 0.9624 - val_loss: 0.6685 - val_accuracy: 0.8370 Epoch 31/200 88/88 [==============================] - 6s 65ms/step - loss: 0.1004 - accuracy: 0.9641 - val_loss: 0.7176 - val_accuracy: 0.8290 Epoch 32/200 88/88 [==============================] - 6s 64ms/step - loss: 0.1000 - accuracy: 0.9651 - val_loss: 0.6537 - val_accuracy: 0.8482 Epoch 33/200 88/88 [==============================] - 6s 65ms/step - loss: 0.0900 - accuracy: 0.9683 - val_loss: 0.6302 - val_accuracy: 0.8480 Epoch 34/200 88/88 [==============================] - 6s 65ms/step - loss: 0.0937 - accuracy: 0.9667 - val_loss: 0.7394 - val_accuracy: 0.8264 Epoch 35/200 88/88 [==============================] - 6s 65ms/step - loss: 0.0900 - accuracy: 0.9695 - val_loss: 0.6937 - val_accuracy: 0.8386 Epoch 36/200 88/88 [==============================] - 6s 65ms/step - loss: 0.0804 - accuracy: 0.9722 - val_loss: 0.6475 - val_accuracy: 0.8490 Epoch 37/200 88/88 [==============================] - 6s 65ms/step - loss: 0.0836 - accuracy: 0.9712 - val_loss: 0.7225 - val_accuracy: 0.8384 Epoch 38/200 88/88 [==============================] - 6s 65ms/step - loss: 0.0850 - accuracy: 0.9716 - val_loss: 0.7580 - val_accuracy: 0.8262 Epoch 39/200 88/88 [==============================] - 6s 65ms/step - loss: 0.0772 - accuracy: 0.9741 - val_loss: 0.6601 - val_accuracy: 0.8410 Epoch 40/200 88/88 [==============================] - 6s 64ms/step - loss: 0.0739 - accuracy: 0.9746 - val_loss: 0.6776 - val_accuracy: 0.8534 Epoch 41/200 88/88 [==============================] - 6s 65ms/step - loss: 0.0759 - accuracy: 0.9728 - val_loss: 0.8401 - val_accuracy: 0.8156 Epoch 42/200 88/88 [==============================] - 6s 64ms/step - loss: 0.0773 - accuracy: 0.9732 - val_loss: 0.6580 - val_accuracy: 0.8472 Epoch 43/200 88/88 [==============================] - 6s 64ms/step - loss: 0.0726 - accuracy: 0.9744 - val_loss: 0.7424 - val_accuracy: 0.8352 Epoch 44/200 88/88 [==============================] - 6s 64ms/step - loss: 0.0717 - accuracy: 0.9748 - val_loss: 0.7733 - val_accuracy: 0.8274 Epoch 45/200 88/88 [==============================] - 6s 65ms/step - loss: 0.0691 - accuracy: 0.9765 - val_loss: 0.8761 - val_accuracy: 0.8104 Epoch 46/200 88/88 [==============================] - 6s 64ms/step - loss: 0.0719 - accuracy: 0.9750 - val_loss: 0.7222 - val_accuracy: 0.8362 Epoch 47/200 88/88 [==============================] - 6s 64ms/step - loss: 0.0667 - accuracy: 0.9764 - val_loss: 0.6815 - val_accuracy: 0.8430 Epoch 48/200 88/88 [==============================] - 6s 64ms/step - loss: 0.0657 - accuracy: 0.9782 - val_loss: 0.7157 - val_accuracy: 0.8372 Epoch 49/200 88/88 [==============================] - 6s 64ms/step - loss: 0.0614 - accuracy: 0.9793 - val_loss: 0.7364 - val_accuracy: 0.8370 Epoch 50/200 88/88 [==============================] - 6s 64ms/step - loss: 0.0663 - accuracy: 0.9774 - val_loss: 0.7134 - val_accuracy: 0.8418 Finished model training in 290s Model performance with training set 1407/1407 [==============================] - 7s 5ms/step - loss: 0.0065 - accuracy: 0.9982 Evaluating model performance with validation set 157/157 [==============================] - 1s 5ms/step - loss: 0.7134 - accuracy: 0.8418
plot_history(history_13)
pred13 = model_13.predict(x_test_norm)
pred13 = np.argmax(pred13, axis=1)
print_validation_report(y_test, pred13)
Classification Report
precision recall f1-score support
0 0.83 0.89 0.86 1000
1 0.96 0.87 0.91 1000
2 0.84 0.69 0.76 1000
3 0.71 0.65 0.68 1000
4 0.70 0.90 0.79 1000
5 0.81 0.68 0.74 1000
6 0.85 0.90 0.87 1000
7 0.86 0.90 0.88 1000
8 0.89 0.93 0.91 1000
9 0.88 0.91 0.89 1000
accuracy 0.83 10000
macro avg 0.83 0.83 0.83 10000
weighted avg 0.83 0.83 0.83 10000
Accuracy Score: 0.8309
Root Mean Square Error: 1.6363984844774209
plot_confusion_matrix_labeled( y_test, pred13)
Experiment 14¶
model_14 = Sequential([
Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu,input_shape=x_train_norm.shape[1:], padding='same'),
MaxPool2D((2, 2),strides=2),
Dropout(0.3),
BatchNormalization(),
Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same'),
MaxPool2D((2, 2),strides=2),
Dropout(0.3),
BatchNormalization(),
Conv2D(filters= 512, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same'),
MaxPool2D((2, 2),strides=2),
Dropout(0.3),
BatchNormalization(),
Conv2D(filters=1024, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same' ),
MaxPool2D((2, 2),strides=2),
Dropout(0.3),
BatchNormalization(),
Flatten(),
Dense(units= 1024,activation=tf.nn.relu),
BatchNormalization(),
Dropout(0.3),
Dense(units= 1024,activation=tf.nn.relu),
BatchNormalization(),
Dropout(0.3),
Dense(units= 1024,activation=tf.nn.relu),
BatchNormalization(),
Dropout(0.3),
Dense(units=10, activation=tf.nn.softmax)
])
model_14.summary()
Model: "sequential_46" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_167 (Conv2D) (None, 32, 32, 128) 3584 _________________________________________________________________ max_pooling2d_167 (MaxPoolin (None, 16, 16, 128) 0 _________________________________________________________________ dropout_38 (Dropout) (None, 16, 16, 128) 0 _________________________________________________________________ batch_normalization_41 (Batc (None, 16, 16, 128) 512 _________________________________________________________________ conv2d_168 (Conv2D) (None, 16, 16, 256) 295168 _________________________________________________________________ max_pooling2d_168 (MaxPoolin (None, 8, 8, 256) 0 _________________________________________________________________ dropout_39 (Dropout) (None, 8, 8, 256) 0 _________________________________________________________________ batch_normalization_42 (Batc (None, 8, 8, 256) 1024 _________________________________________________________________ conv2d_169 (Conv2D) (None, 8, 8, 512) 1180160 _________________________________________________________________ max_pooling2d_169 (MaxPoolin (None, 4, 4, 512) 0 _________________________________________________________________ dropout_40 (Dropout) (None, 4, 4, 512) 0 _________________________________________________________________ batch_normalization_43 (Batc (None, 4, 4, 512) 2048 _________________________________________________________________ conv2d_170 (Conv2D) (None, 4, 4, 1024) 4719616 _________________________________________________________________ max_pooling2d_170 (MaxPoolin (None, 2, 2, 1024) 0 _________________________________________________________________ dropout_41 (Dropout) (None, 2, 2, 1024) 0 _________________________________________________________________ batch_normalization_44 (Batc (None, 2, 2, 1024) 4096 _________________________________________________________________ flatten_40 (Flatten) (None, 4096) 0 _________________________________________________________________ dense_91 (Dense) (None, 1024) 4195328 _________________________________________________________________ batch_normalization_45 (Batc (None, 1024) 4096 _________________________________________________________________ dropout_42 (Dropout) (None, 1024) 0 _________________________________________________________________ dense_92 (Dense) (None, 1024) 1049600 _________________________________________________________________ batch_normalization_46 (Batc (None, 1024) 4096 _________________________________________________________________ dropout_43 (Dropout) (None, 1024) 0 _________________________________________________________________ dense_93 (Dense) (None, 1024) 1049600 _________________________________________________________________ batch_normalization_47 (Batc (None, 1024) 4096 _________________________________________________________________ dropout_44 (Dropout) (None, 1024) 0 _________________________________________________________________ dense_94 (Dense) (None, 10) 10250 ================================================================= Total params: 12,523,274 Trainable params: 12,513,290 Non-trainable params: 9,984 _________________________________________________________________
history_14, model_14 = compile_train_model(model_14,
x_train_norm, y_train_split,
x_valid_norm, y_valid_split,
optimizer = 'Adam'
)
Epoch 1/200 88/88 [==============================] - 8s 72ms/step - loss: 2.0226 - accuracy: 0.3397 - val_loss: 13.5347 - val_accuracy: 0.0970 Epoch 2/200 88/88 [==============================] - 6s 68ms/step - loss: 1.4229 - accuracy: 0.4970 - val_loss: 17.0798 - val_accuracy: 0.0970 Epoch 3/200 88/88 [==============================] - 6s 68ms/step - loss: 1.1841 - accuracy: 0.5774 - val_loss: 15.1160 - val_accuracy: 0.0970 Epoch 4/200 88/88 [==============================] - 6s 68ms/step - loss: 1.0237 - accuracy: 0.6354 - val_loss: 11.4714 - val_accuracy: 0.0970 Epoch 5/200 88/88 [==============================] - 6s 68ms/step - loss: 0.8900 - accuracy: 0.6850 - val_loss: 7.8868 - val_accuracy: 0.0974 Epoch 6/200 88/88 [==============================] - 6s 68ms/step - loss: 0.7970 - accuracy: 0.7187 - val_loss: 6.3553 - val_accuracy: 0.1164 Epoch 7/200 88/88 [==============================] - 6s 68ms/step - loss: 0.7021 - accuracy: 0.7534 - val_loss: 2.5714 - val_accuracy: 0.4180 Epoch 8/200 88/88 [==============================] - 6s 68ms/step - loss: 0.6416 - accuracy: 0.7741 - val_loss: 2.3919 - val_accuracy: 0.4506 Epoch 9/200 88/88 [==============================] - 6s 68ms/step - loss: 0.5775 - accuracy: 0.7962 - val_loss: 0.7944 - val_accuracy: 0.7418 Epoch 10/200 88/88 [==============================] - 6s 68ms/step - loss: 0.5239 - accuracy: 0.8145 - val_loss: 0.6877 - val_accuracy: 0.7760 Epoch 11/200 88/88 [==============================] - 6s 68ms/step - loss: 0.4727 - accuracy: 0.8343 - val_loss: 0.6976 - val_accuracy: 0.7706 Epoch 12/200 88/88 [==============================] - 6s 68ms/step - loss: 0.4244 - accuracy: 0.8504 - val_loss: 0.6114 - val_accuracy: 0.8034 Epoch 13/200 88/88 [==============================] - 6s 67ms/step - loss: 0.3836 - accuracy: 0.8646 - val_loss: 0.5855 - val_accuracy: 0.8148 Epoch 14/200 88/88 [==============================] - 6s 68ms/step - loss: 0.3455 - accuracy: 0.8781 - val_loss: 0.6241 - val_accuracy: 0.8046 Epoch 15/200 88/88 [==============================] - 6s 68ms/step - loss: 0.3190 - accuracy: 0.8871 - val_loss: 0.7310 - val_accuracy: 0.7908 Epoch 16/200 88/88 [==============================] - 6s 67ms/step - loss: 0.2918 - accuracy: 0.8947 - val_loss: 0.5528 - val_accuracy: 0.8338 Epoch 17/200 88/88 [==============================] - 6s 68ms/step - loss: 0.2610 - accuracy: 0.9079 - val_loss: 0.8857 - val_accuracy: 0.7624 Epoch 18/200 88/88 [==============================] - 6s 67ms/step - loss: 0.2434 - accuracy: 0.9137 - val_loss: 0.6541 - val_accuracy: 0.8192 Epoch 19/200 88/88 [==============================] - 6s 67ms/step - loss: 0.2177 - accuracy: 0.9228 - val_loss: 0.7728 - val_accuracy: 0.7974 Epoch 20/200 88/88 [==============================] - 6s 67ms/step - loss: 0.1987 - accuracy: 0.9294 - val_loss: 0.6503 - val_accuracy: 0.8224 Epoch 21/200 88/88 [==============================] - 6s 67ms/step - loss: 0.1854 - accuracy: 0.9347 - val_loss: 0.6189 - val_accuracy: 0.8292 Epoch 22/200 88/88 [==============================] - 6s 67ms/step - loss: 0.1712 - accuracy: 0.9378 - val_loss: 0.6311 - val_accuracy: 0.8264 Epoch 23/200 88/88 [==============================] - 6s 67ms/step - loss: 0.1585 - accuracy: 0.9440 - val_loss: 0.6749 - val_accuracy: 0.8288 Epoch 24/200 88/88 [==============================] - 6s 67ms/step - loss: 0.1455 - accuracy: 0.9490 - val_loss: 0.6623 - val_accuracy: 0.8302 Epoch 25/200 88/88 [==============================] - 6s 67ms/step - loss: 0.1423 - accuracy: 0.9495 - val_loss: 0.6461 - val_accuracy: 0.8368 Epoch 26/200 88/88 [==============================] - 6s 67ms/step - loss: 0.1307 - accuracy: 0.9538 - val_loss: 0.6685 - val_accuracy: 0.8302 Epoch 27/200 88/88 [==============================] - 6s 67ms/step - loss: 0.1237 - accuracy: 0.9567 - val_loss: 0.6852 - val_accuracy: 0.8282 Epoch 28/200 88/88 [==============================] - 6s 67ms/step - loss: 0.1152 - accuracy: 0.9605 - val_loss: 0.6637 - val_accuracy: 0.8352 Epoch 29/200 88/88 [==============================] - 6s 67ms/step - loss: 0.1108 - accuracy: 0.9608 - val_loss: 0.6661 - val_accuracy: 0.8386 Epoch 30/200 88/88 [==============================] - 6s 67ms/step - loss: 0.1027 - accuracy: 0.9643 - val_loss: 0.8242 - val_accuracy: 0.8130 Epoch 31/200 88/88 [==============================] - 6s 67ms/step - loss: 0.1028 - accuracy: 0.9633 - val_loss: 0.7744 - val_accuracy: 0.8276 Epoch 32/200 88/88 [==============================] - 6s 67ms/step - loss: 0.0977 - accuracy: 0.9652 - val_loss: 0.6539 - val_accuracy: 0.8464 Epoch 33/200 88/88 [==============================] - 6s 67ms/step - loss: 0.1021 - accuracy: 0.9655 - val_loss: 0.6189 - val_accuracy: 0.8472 Epoch 34/200 88/88 [==============================] - 6s 68ms/step - loss: 0.0930 - accuracy: 0.9678 - val_loss: 0.6672 - val_accuracy: 0.8394 Epoch 35/200 88/88 [==============================] - 6s 67ms/step - loss: 0.0861 - accuracy: 0.9696 - val_loss: 0.7004 - val_accuracy: 0.8442 Epoch 36/200 88/88 [==============================] - 6s 67ms/step - loss: 0.0875 - accuracy: 0.9689 - val_loss: 0.6990 - val_accuracy: 0.8390 Epoch 37/200 88/88 [==============================] - 6s 67ms/step - loss: 0.0837 - accuracy: 0.9708 - val_loss: 0.7802 - val_accuracy: 0.8314 Epoch 38/200 88/88 [==============================] - 6s 67ms/step - loss: 0.0858 - accuracy: 0.9699 - val_loss: 0.6640 - val_accuracy: 0.8418 Epoch 39/200 88/88 [==============================] - 6s 67ms/step - loss: 0.0793 - accuracy: 0.9740 - val_loss: 0.6579 - val_accuracy: 0.8466 Epoch 40/200 88/88 [==============================] - 6s 67ms/step - loss: 0.0764 - accuracy: 0.9730 - val_loss: 0.7346 - val_accuracy: 0.8360 Epoch 41/200 88/88 [==============================] - 6s 67ms/step - loss: 0.0747 - accuracy: 0.9750 - val_loss: 0.7120 - val_accuracy: 0.8410 Epoch 42/200 88/88 [==============================] - 6s 67ms/step - loss: 0.0750 - accuracy: 0.9745 - val_loss: 0.7472 - val_accuracy: 0.8352 Epoch 43/200 88/88 [==============================] - 6s 67ms/step - loss: 0.0722 - accuracy: 0.9751 - val_loss: 0.6898 - val_accuracy: 0.8430 Finished model training in 263s Model performance with training set 1407/1407 [==============================] - 7s 5ms/step - loss: 0.0098 - accuracy: 0.9971 Evaluating model performance with validation set 157/157 [==============================] - 1s 5ms/step - loss: 0.6898 - accuracy: 0.8430
plot_history(history_14)
pred14 = model_14.predict(x_test_norm)
pred14 = np.argmax(pred14, axis=1)
print_validation_report(y_test, pred14)
Classification Report
precision recall f1-score support
0 0.86 0.84 0.85 1000
1 0.91 0.93 0.92 1000
2 0.77 0.76 0.77 1000
3 0.67 0.68 0.67 1000
4 0.87 0.70 0.78 1000
5 0.68 0.81 0.74 1000
6 0.82 0.91 0.86 1000
7 0.89 0.84 0.86 1000
8 0.94 0.88 0.91 1000
9 0.89 0.90 0.90 1000
accuracy 0.82 10000
macro avg 0.83 0.82 0.83 10000
weighted avg 0.83 0.82 0.83 10000
Accuracy Score: 0.8248
Root Mean Square Error: 1.6192282112166896
plot_confusion_matrix_labeled( y_test, pred14)
Experiment 15¶
model_15 = Sequential([
Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu,input_shape=x_train_norm.shape[1:], padding='same'),
MaxPool2D((2, 2),strides=2),
Dropout(0.5),
BatchNormalization(),
Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same'),
MaxPool2D((2, 2),strides=2),
Dropout(0.5),
BatchNormalization(),
Conv2D(filters= 512, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same'),
MaxPool2D((2, 2),strides=2),
Dropout(0.5),
BatchNormalization(),
Conv2D(filters=1024, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same' ),
MaxPool2D((2, 2),strides=2),
Dropout(0.5),
BatchNormalization(),
Flatten(),
Dense(units= 1024,activation=tf.nn.relu,kernel_regularizer=tf.keras.regularizers.L2(0.001)),
BatchNormalization(),
Dense(units= 1024,activation=tf.nn.relu, kernel_regularizer=tf.keras.regularizers.L2(0.001)),
BatchNormalization(),
Dense(units= 1024,activation=tf.nn.relu, kernel_regularizer=tf.keras.regularizers.L2(0.001)),
BatchNormalization(),
Dense(units=10, activation=tf.nn.softmax)
])
model_15.summary()
Model: "sequential_48" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_175 (Conv2D) (None, 32, 32, 128) 3584 _________________________________________________________________ max_pooling2d_175 (MaxPoolin (None, 16, 16, 128) 0 _________________________________________________________________ dropout_49 (Dropout) (None, 16, 16, 128) 0 _________________________________________________________________ batch_normalization_55 (Batc (None, 16, 16, 128) 512 _________________________________________________________________ conv2d_176 (Conv2D) (None, 16, 16, 256) 295168 _________________________________________________________________ max_pooling2d_176 (MaxPoolin (None, 8, 8, 256) 0 _________________________________________________________________ dropout_50 (Dropout) (None, 8, 8, 256) 0 _________________________________________________________________ batch_normalization_56 (Batc (None, 8, 8, 256) 1024 _________________________________________________________________ conv2d_177 (Conv2D) (None, 8, 8, 512) 1180160 _________________________________________________________________ max_pooling2d_177 (MaxPoolin (None, 4, 4, 512) 0 _________________________________________________________________ dropout_51 (Dropout) (None, 4, 4, 512) 0 _________________________________________________________________ batch_normalization_57 (Batc (None, 4, 4, 512) 2048 _________________________________________________________________ conv2d_178 (Conv2D) (None, 4, 4, 1024) 4719616 _________________________________________________________________ max_pooling2d_178 (MaxPoolin (None, 2, 2, 1024) 0 _________________________________________________________________ dropout_52 (Dropout) (None, 2, 2, 1024) 0 _________________________________________________________________ batch_normalization_58 (Batc (None, 2, 2, 1024) 4096 _________________________________________________________________ flatten_42 (Flatten) (None, 4096) 0 _________________________________________________________________ dense_99 (Dense) (None, 1024) 4195328 _________________________________________________________________ batch_normalization_59 (Batc (None, 1024) 4096 _________________________________________________________________ dense_100 (Dense) (None, 1024) 1049600 _________________________________________________________________ batch_normalization_60 (Batc (None, 1024) 4096 _________________________________________________________________ dense_101 (Dense) (None, 1024) 1049600 _________________________________________________________________ batch_normalization_61 (Batc (None, 1024) 4096 _________________________________________________________________ dense_102 (Dense) (None, 10) 10250 ================================================================= Total params: 12,523,274 Trainable params: 12,513,290 Non-trainable params: 9,984 _________________________________________________________________
history_15, model_15 = compile_train_model(model_15,
x_train_norm, y_train_split,
x_valid_norm, y_valid_split,
optimizer = 'Adam'
)
Epoch 1/200 88/88 [==============================] - 8s 72ms/step - loss: 5.5420 - accuracy: 0.3295 - val_loss: 9.4948 - val_accuracy: 0.0970 Epoch 2/200 88/88 [==============================] - 6s 67ms/step - loss: 4.2763 - accuracy: 0.4639 - val_loss: 8.5190 - val_accuracy: 0.0970 Epoch 3/200 88/88 [==============================] - 6s 67ms/step - loss: 3.2684 - accuracy: 0.5218 - val_loss: 9.3390 - val_accuracy: 0.0970 Epoch 4/200 88/88 [==============================] - 6s 67ms/step - loss: 2.4937 - accuracy: 0.5657 - val_loss: 11.1612 - val_accuracy: 0.0970 Epoch 5/200 88/88 [==============================] - 6s 67ms/step - loss: 1.9462 - accuracy: 0.6041 - val_loss: 7.1761 - val_accuracy: 0.1028 Epoch 6/200 88/88 [==============================] - 6s 67ms/step - loss: 1.5859 - accuracy: 0.6362 - val_loss: 8.4402 - val_accuracy: 0.0970 Epoch 7/200 88/88 [==============================] - 6s 67ms/step - loss: 1.3662 - accuracy: 0.6590 - val_loss: 4.0182 - val_accuracy: 0.1772 Epoch 8/200 88/88 [==============================] - 6s 67ms/step - loss: 1.2202 - accuracy: 0.6795 - val_loss: 1.7736 - val_accuracy: 0.5208 Epoch 9/200 88/88 [==============================] - 6s 68ms/step - loss: 1.1203 - accuracy: 0.7008 - val_loss: 2.3827 - val_accuracy: 0.4176 Epoch 10/200 88/88 [==============================] - 6s 67ms/step - loss: 1.0510 - accuracy: 0.7206 - val_loss: 1.2111 - val_accuracy: 0.6654 Epoch 11/200 88/88 [==============================] - 6s 67ms/step - loss: 1.0071 - accuracy: 0.7348 - val_loss: 1.2972 - val_accuracy: 0.6528 Epoch 12/200 88/88 [==============================] - 6s 67ms/step - loss: 0.9801 - accuracy: 0.7422 - val_loss: 1.2838 - val_accuracy: 0.6754 Epoch 13/200 88/88 [==============================] - 6s 68ms/step - loss: 0.9567 - accuracy: 0.7545 - val_loss: 1.0335 - val_accuracy: 0.7340 Epoch 14/200 88/88 [==============================] - 6s 68ms/step - loss: 0.9430 - accuracy: 0.7600 - val_loss: 1.0043 - val_accuracy: 0.7428 Epoch 15/200 88/88 [==============================] - 6s 68ms/step - loss: 0.9286 - accuracy: 0.7689 - val_loss: 1.0656 - val_accuracy: 0.7264 Epoch 16/200 88/88 [==============================] - 6s 68ms/step - loss: 0.9072 - accuracy: 0.7780 - val_loss: 0.9170 - val_accuracy: 0.7810 Epoch 17/200 88/88 [==============================] - 6s 67ms/step - loss: 0.9093 - accuracy: 0.7790 - val_loss: 0.9678 - val_accuracy: 0.7588 Epoch 18/200 88/88 [==============================] - 6s 68ms/step - loss: 0.8875 - accuracy: 0.7869 - val_loss: 0.9208 - val_accuracy: 0.7832 Epoch 19/200 88/88 [==============================] - 6s 67ms/step - loss: 0.8773 - accuracy: 0.7942 - val_loss: 0.8933 - val_accuracy: 0.7948 Epoch 20/200 88/88 [==============================] - 6s 68ms/step - loss: 0.8744 - accuracy: 0.7977 - val_loss: 1.0627 - val_accuracy: 0.7456 Epoch 21/200 88/88 [==============================] - 6s 67ms/step - loss: 0.8570 - accuracy: 0.8035 - val_loss: 0.8842 - val_accuracy: 0.8010 Epoch 22/200 88/88 [==============================] - 6s 67ms/step - loss: 0.8488 - accuracy: 0.8096 - val_loss: 1.0633 - val_accuracy: 0.7426 Epoch 23/200 88/88 [==============================] - 6s 67ms/step - loss: 0.8411 - accuracy: 0.8137 - val_loss: 0.8368 - val_accuracy: 0.8162 Epoch 24/200 88/88 [==============================] - 6s 67ms/step - loss: 0.8379 - accuracy: 0.8160 - val_loss: 0.8839 - val_accuracy: 0.8024 Epoch 25/200 88/88 [==============================] - 6s 67ms/step - loss: 0.8237 - accuracy: 0.8203 - val_loss: 1.4477 - val_accuracy: 0.6734 Epoch 26/200 88/88 [==============================] - 6s 67ms/step - loss: 0.8170 - accuracy: 0.8231 - val_loss: 1.0597 - val_accuracy: 0.7508 Epoch 27/200 88/88 [==============================] - 6s 67ms/step - loss: 0.7963 - accuracy: 0.8308 - val_loss: 0.8886 - val_accuracy: 0.8038 Epoch 28/200 88/88 [==============================] - 6s 67ms/step - loss: 0.7859 - accuracy: 0.8331 - val_loss: 0.8343 - val_accuracy: 0.8264 Epoch 29/200 88/88 [==============================] - 6s 67ms/step - loss: 0.7838 - accuracy: 0.8344 - val_loss: 0.8714 - val_accuracy: 0.8172 Epoch 30/200 88/88 [==============================] - 6s 67ms/step - loss: 0.7633 - accuracy: 0.8377 - val_loss: 0.9678 - val_accuracy: 0.7794 Epoch 31/200 88/88 [==============================] - 6s 67ms/step - loss: 0.7586 - accuracy: 0.8408 - val_loss: 0.8553 - val_accuracy: 0.8222 Epoch 32/200 88/88 [==============================] - 6s 67ms/step - loss: 0.7660 - accuracy: 0.8402 - val_loss: 0.9795 - val_accuracy: 0.7756 Epoch 33/200 88/88 [==============================] - 6s 67ms/step - loss: 0.7451 - accuracy: 0.8446 - val_loss: 0.8618 - val_accuracy: 0.8138 Epoch 34/200 88/88 [==============================] - 6s 68ms/step - loss: 0.7368 - accuracy: 0.8476 - val_loss: 0.8187 - val_accuracy: 0.8230 Epoch 35/200 88/88 [==============================] - 6s 67ms/step - loss: 0.7372 - accuracy: 0.8493 - val_loss: 0.8065 - val_accuracy: 0.8258 Epoch 36/200 88/88 [==============================] - 6s 67ms/step - loss: 0.7274 - accuracy: 0.8521 - val_loss: 0.8681 - val_accuracy: 0.8246 Epoch 37/200 88/88 [==============================] - 6s 67ms/step - loss: 0.7280 - accuracy: 0.8543 - val_loss: 0.8592 - val_accuracy: 0.8124 Epoch 38/200 88/88 [==============================] - 6s 67ms/step - loss: 0.7031 - accuracy: 0.8571 - val_loss: 0.8104 - val_accuracy: 0.8256 Finished model training in 233s Model performance with training set 1407/1407 [==============================] - 7s 5ms/step - loss: 0.5176 - accuracy: 0.9285 Evaluating model performance with validation set 157/157 [==============================] - 1s 5ms/step - loss: 0.8104 - accuracy: 0.8256
plot_history(history_15)
pred15 = model_15.predict(x_test_norm)
pred15 = np.argmax(pred15, axis=1)
print_validation_report(y_test, pred15)
Classification Report
precision recall f1-score support
0 0.87 0.83 0.85 1000
1 0.97 0.83 0.90 1000
2 0.88 0.63 0.74 1000
3 0.69 0.66 0.68 1000
4 0.66 0.89 0.76 1000
5 0.75 0.76 0.76 1000
6 0.78 0.93 0.85 1000
7 0.93 0.80 0.86 1000
8 0.89 0.92 0.90 1000
9 0.85 0.92 0.88 1000
accuracy 0.82 10000
macro avg 0.83 0.82 0.82 10000
weighted avg 0.83 0.82 0.82 10000
Accuracy Score: 0.8167
Root Mean Square Error: 1.730664612222715
plot_confusion_matrix_labeled( y_test, pred15)
Experiment 16¶
model_16 = Sequential([
Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu,input_shape=x_train_norm.shape[1:], padding='same'),
MaxPool2D((2, 2),strides=2),
Dropout(0.5),
BatchNormalization(),
Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same'),
Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same'),
Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same'),
MaxPool2D((2, 2),strides=2),
Dropout(0.5),
BatchNormalization(),
Conv2D(filters= 512, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same'),
Conv2D(filters= 512, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same'),
Conv2D(filters= 512, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same'),
MaxPool2D((2, 2),strides=2),
Dropout(0.5),
BatchNormalization(),
Conv2D(filters=1024, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same' ),
Conv2D(filters=1024, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same' ),
Conv2D(filters=1024, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same' ),
MaxPool2D((2, 2),strides=2),
Dropout(0.5),
BatchNormalization(),
Flatten(),
Dense(units= 1024,activation=tf.nn.relu),
BatchNormalization(),
Dropout(0.5),
Dense(units= 1024,activation=tf.nn.relu, ),
BatchNormalization(),
Dropout(0.5),
Dense(units= 1024,activation=tf.nn.relu, ),
BatchNormalization(),
Dropout(0.5),
Dense(units=10, activation=tf.nn.softmax)
])
model_16.summary()
Model: "sequential_14" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_11 (Conv2D) (None, 32, 32, 128) 3584 _________________________________________________________________ max_pooling2d_11 (MaxPooling (None, 16, 16, 128) 0 _________________________________________________________________ dropout_15 (Dropout) (None, 16, 16, 128) 0 _________________________________________________________________ batch_normalization_33 (Batc (None, 16, 16, 128) 512 _________________________________________________________________ conv2d_12 (Conv2D) (None, 16, 16, 256) 295168 _________________________________________________________________ conv2d_13 (Conv2D) (None, 16, 16, 256) 590080 _________________________________________________________________ conv2d_14 (Conv2D) (None, 16, 16, 256) 590080 _________________________________________________________________ max_pooling2d_12 (MaxPooling (None, 8, 8, 256) 0 _________________________________________________________________ dropout_16 (Dropout) (None, 8, 8, 256) 0 _________________________________________________________________ batch_normalization_34 (Batc (None, 8, 8, 256) 1024 _________________________________________________________________ conv2d_15 (Conv2D) (None, 8, 8, 512) 1180160 _________________________________________________________________ conv2d_16 (Conv2D) (None, 8, 8, 512) 2359808 _________________________________________________________________ conv2d_17 (Conv2D) (None, 8, 8, 512) 2359808 _________________________________________________________________ max_pooling2d_13 (MaxPooling (None, 4, 4, 512) 0 _________________________________________________________________ dropout_17 (Dropout) (None, 4, 4, 512) 0 _________________________________________________________________ batch_normalization_35 (Batc (None, 4, 4, 512) 2048 _________________________________________________________________ conv2d_18 (Conv2D) (None, 4, 4, 1024) 4719616 _________________________________________________________________ conv2d_19 (Conv2D) (None, 4, 4, 1024) 9438208 _________________________________________________________________ conv2d_20 (Conv2D) (None, 4, 4, 1024) 9438208 _________________________________________________________________ max_pooling2d_14 (MaxPooling (None, 2, 2, 1024) 0 _________________________________________________________________ dropout_18 (Dropout) (None, 2, 2, 1024) 0 _________________________________________________________________ batch_normalization_36 (Batc (None, 2, 2, 1024) 4096 _________________________________________________________________ flatten_6 (Flatten) (None, 4096) 0 _________________________________________________________________ dense_46 (Dense) (None, 1024) 4195328 _________________________________________________________________ batch_normalization_37 (Batc (None, 1024) 4096 _________________________________________________________________ dropout_19 (Dropout) (None, 1024) 0 _________________________________________________________________ dense_47 (Dense) (None, 1024) 1049600 _________________________________________________________________ batch_normalization_38 (Batc (None, 1024) 4096 _________________________________________________________________ dropout_20 (Dropout) (None, 1024) 0 _________________________________________________________________ dense_48 (Dense) (None, 1024) 1049600 _________________________________________________________________ batch_normalization_39 (Batc (None, 1024) 4096 _________________________________________________________________ dropout_21 (Dropout) (None, 1024) 0 _________________________________________________________________ dense_49 (Dense) (None, 10) 10250 ================================================================= Total params: 37,299,466 Trainable params: 37,289,482 Non-trainable params: 9,984 _________________________________________________________________
history_16, model_16 = compile_train_model(model_16,
x_train_norm, y_train_split,
x_valid_norm, y_valid_split,
optimizer = 'Adam'
)
Epoch 1/200 88/88 [==============================] - 30s 277ms/step - loss: 2.5177 - accuracy: 0.1810 - val_loss: 4.0749 - val_accuracy: 0.0970 Epoch 2/200 88/88 [==============================] - 17s 198ms/step - loss: 2.0827 - accuracy: 0.2177 - val_loss: 4.1615 - val_accuracy: 0.0972 Epoch 3/200 88/88 [==============================] - 17s 197ms/step - loss: 1.9698 - accuracy: 0.2572 - val_loss: 3.6124 - val_accuracy: 0.1020 Epoch 4/200 88/88 [==============================] - 17s 197ms/step - loss: 1.8183 - accuracy: 0.3125 - val_loss: 2.2030 - val_accuracy: 0.1754 Epoch 5/200 88/88 [==============================] - 17s 197ms/step - loss: 1.8037 - accuracy: 0.3193 - val_loss: 2.5208 - val_accuracy: 0.1690 Epoch 6/200 88/88 [==============================] - 17s 197ms/step - loss: 1.7398 - accuracy: 0.3449 - val_loss: 2.4168 - val_accuracy: 0.2122 Epoch 7/200 88/88 [==============================] - 17s 197ms/step - loss: 1.6422 - accuracy: 0.3760 - val_loss: 2.7872 - val_accuracy: 0.1698 Epoch 8/200 88/88 [==============================] - 17s 197ms/step - loss: 1.6001 - accuracy: 0.3993 - val_loss: 1.7578 - val_accuracy: 0.3130 Epoch 9/200 88/88 [==============================] - 17s 198ms/step - loss: 1.4546 - accuracy: 0.4587 - val_loss: 1.6970 - val_accuracy: 0.4102 Epoch 10/200 88/88 [==============================] - 17s 197ms/step - loss: 1.3532 - accuracy: 0.5041 - val_loss: 1.7961 - val_accuracy: 0.4174 Epoch 11/200 88/88 [==============================] - 17s 198ms/step - loss: 1.3523 - accuracy: 0.5077 - val_loss: 2.1002 - val_accuracy: 0.3768 Epoch 12/200 88/88 [==============================] - 17s 197ms/step - loss: 1.2390 - accuracy: 0.5539 - val_loss: 1.2965 - val_accuracy: 0.5196 Epoch 13/200 88/88 [==============================] - 17s 198ms/step - loss: 1.1685 - accuracy: 0.5803 - val_loss: 1.5129 - val_accuracy: 0.4734 Epoch 14/200 88/88 [==============================] - 17s 197ms/step - loss: 1.1004 - accuracy: 0.6074 - val_loss: 1.0602 - val_accuracy: 0.6156 Epoch 15/200 88/88 [==============================] - 17s 197ms/step - loss: 1.0231 - accuracy: 0.6368 - val_loss: 0.9642 - val_accuracy: 0.6564 Epoch 16/200 88/88 [==============================] - 17s 197ms/step - loss: 0.9615 - accuracy: 0.6593 - val_loss: 1.2262 - val_accuracy: 0.5866 Epoch 17/200 88/88 [==============================] - 17s 197ms/step - loss: 0.9288 - accuracy: 0.6755 - val_loss: 1.0131 - val_accuracy: 0.6404 Epoch 18/200 88/88 [==============================] - 17s 197ms/step - loss: 0.8963 - accuracy: 0.6852 - val_loss: 0.8278 - val_accuracy: 0.7042 Epoch 19/200 88/88 [==============================] - 17s 198ms/step - loss: 0.8235 - accuracy: 0.7115 - val_loss: 0.7651 - val_accuracy: 0.7248 Epoch 20/200 88/88 [==============================] - 17s 197ms/step - loss: 0.7762 - accuracy: 0.7318 - val_loss: 0.7090 - val_accuracy: 0.7462 Epoch 21/200 88/88 [==============================] - 17s 197ms/step - loss: 0.7434 - accuracy: 0.7421 - val_loss: 0.8624 - val_accuracy: 0.7078 Epoch 22/200 88/88 [==============================] - 17s 197ms/step - loss: 0.6984 - accuracy: 0.7589 - val_loss: 0.7874 - val_accuracy: 0.7310 Epoch 23/200 88/88 [==============================] - 17s 197ms/step - loss: 0.6580 - accuracy: 0.7745 - val_loss: 0.7733 - val_accuracy: 0.7382 Epoch 24/200 88/88 [==============================] - 17s 197ms/step - loss: 0.6280 - accuracy: 0.7866 - val_loss: 0.6869 - val_accuracy: 0.7678 Epoch 25/200 88/88 [==============================] - 17s 197ms/step - loss: 0.6022 - accuracy: 0.7944 - val_loss: 0.6731 - val_accuracy: 0.7658 Epoch 26/200 88/88 [==============================] - 17s 197ms/step - loss: 0.5728 - accuracy: 0.8053 - val_loss: 0.7177 - val_accuracy: 0.7718 Epoch 27/200 88/88 [==============================] - 17s 197ms/step - loss: 0.5451 - accuracy: 0.8140 - val_loss: 0.6674 - val_accuracy: 0.7880 Epoch 28/200 88/88 [==============================] - 17s 197ms/step - loss: 0.5157 - accuracy: 0.8254 - val_loss: 0.6367 - val_accuracy: 0.7928 Epoch 29/200 88/88 [==============================] - 17s 197ms/step - loss: 0.4910 - accuracy: 0.8340 - val_loss: 0.5666 - val_accuracy: 0.8202 Epoch 30/200 88/88 [==============================] - 17s 197ms/step - loss: 0.4745 - accuracy: 0.8391 - val_loss: 0.6034 - val_accuracy: 0.8210 Epoch 31/200 88/88 [==============================] - 17s 198ms/step - loss: 0.4454 - accuracy: 0.8494 - val_loss: 0.5622 - val_accuracy: 0.8212 Epoch 32/200 88/88 [==============================] - 17s 198ms/step - loss: 0.4245 - accuracy: 0.8571 - val_loss: 0.5600 - val_accuracy: 0.8164 Epoch 33/200 88/88 [==============================] - 17s 197ms/step - loss: 0.3954 - accuracy: 0.8662 - val_loss: 0.5317 - val_accuracy: 0.8284 Epoch 34/200 88/88 [==============================] - 17s 197ms/step - loss: 0.3768 - accuracy: 0.8731 - val_loss: 0.4934 - val_accuracy: 0.8444 Epoch 35/200 88/88 [==============================] - 17s 197ms/step - loss: 0.3543 - accuracy: 0.8801 - val_loss: 0.6441 - val_accuracy: 0.8106 Epoch 36/200 88/88 [==============================] - 17s 198ms/step - loss: 0.3505 - accuracy: 0.8816 - val_loss: 0.5356 - val_accuracy: 0.8350 Epoch 37/200 88/88 [==============================] - 17s 197ms/step - loss: 0.3162 - accuracy: 0.8937 - val_loss: 0.5075 - val_accuracy: 0.8378 Epoch 38/200 88/88 [==============================] - 17s 197ms/step - loss: 0.3190 - accuracy: 0.8929 - val_loss: 0.5325 - val_accuracy: 0.8418 Epoch 39/200 88/88 [==============================] - 17s 197ms/step - loss: 0.2939 - accuracy: 0.9008 - val_loss: 0.4898 - val_accuracy: 0.8476 Epoch 40/200 88/88 [==============================] - 17s 198ms/step - loss: 0.2727 - accuracy: 0.9083 - val_loss: 0.5242 - val_accuracy: 0.8434 Epoch 41/200 88/88 [==============================] - 17s 197ms/step - loss: 0.2704 - accuracy: 0.9083 - val_loss: 0.5462 - val_accuracy: 0.8370 Epoch 42/200 88/88 [==============================] - 17s 197ms/step - loss: 0.2503 - accuracy: 0.9150 - val_loss: 0.6063 - val_accuracy: 0.8276 Epoch 43/200 88/88 [==============================] - 17s 197ms/step - loss: 0.2467 - accuracy: 0.9158 - val_loss: 0.5516 - val_accuracy: 0.8328 Epoch 44/200 88/88 [==============================] - 17s 197ms/step - loss: 0.2317 - accuracy: 0.9224 - val_loss: 0.5447 - val_accuracy: 0.8420 Epoch 45/200 88/88 [==============================] - 17s 197ms/step - loss: 0.2251 - accuracy: 0.9243 - val_loss: 0.5007 - val_accuracy: 0.8524 Epoch 46/200 88/88 [==============================] - 17s 198ms/step - loss: 0.2068 - accuracy: 0.9302 - val_loss: 0.5369 - val_accuracy: 0.8508 Epoch 47/200 88/88 [==============================] - 17s 197ms/step - loss: 0.2025 - accuracy: 0.9324 - val_loss: 0.5551 - val_accuracy: 0.8516 Epoch 48/200 88/88 [==============================] - 17s 197ms/step - loss: 0.1901 - accuracy: 0.9366 - val_loss: 0.5283 - val_accuracy: 0.8564 Epoch 49/200 88/88 [==============================] - 17s 197ms/step - loss: 0.1781 - accuracy: 0.9397 - val_loss: 0.5940 - val_accuracy: 0.8452 Epoch 50/200 88/88 [==============================] - 17s 197ms/step - loss: 0.1711 - accuracy: 0.9425 - val_loss: 0.5222 - val_accuracy: 0.8548 Epoch 51/200 88/88 [==============================] - 17s 197ms/step - loss: 0.1655 - accuracy: 0.9445 - val_loss: 0.6251 - val_accuracy: 0.8406 Epoch 52/200 88/88 [==============================] - 17s 197ms/step - loss: 0.1609 - accuracy: 0.9456 - val_loss: 0.5089 - val_accuracy: 0.8590 Epoch 53/200 88/88 [==============================] - 17s 197ms/step - loss: 0.1473 - accuracy: 0.9505 - val_loss: 0.5379 - val_accuracy: 0.8584 Epoch 54/200 88/88 [==============================] - 17s 198ms/step - loss: 0.1410 - accuracy: 0.9538 - val_loss: 0.5707 - val_accuracy: 0.8520 Epoch 55/200 88/88 [==============================] - 17s 197ms/step - loss: 0.1415 - accuracy: 0.9527 - val_loss: 0.5436 - val_accuracy: 0.8608 Epoch 56/200 88/88 [==============================] - 17s 198ms/step - loss: 0.1474 - accuracy: 0.9506 - val_loss: 0.5639 - val_accuracy: 0.8566 Epoch 57/200 88/88 [==============================] - 17s 197ms/step - loss: 0.1301 - accuracy: 0.9554 - val_loss: 0.5676 - val_accuracy: 0.8530 Epoch 58/200 88/88 [==============================] - 17s 197ms/step - loss: 0.1315 - accuracy: 0.9558 - val_loss: 0.6419 - val_accuracy: 0.8352 Epoch 59/200 88/88 [==============================] - 17s 198ms/step - loss: 0.1383 - accuracy: 0.9548 - val_loss: 0.6005 - val_accuracy: 0.8502 Epoch 60/200 88/88 [==============================] - 17s 197ms/step - loss: 0.1201 - accuracy: 0.9606 - val_loss: 0.5509 - val_accuracy: 0.8618 Epoch 61/200 88/88 [==============================] - 17s 197ms/step - loss: 0.1103 - accuracy: 0.9640 - val_loss: 0.5456 - val_accuracy: 0.8634 Epoch 62/200 88/88 [==============================] - 17s 198ms/step - loss: 0.1127 - accuracy: 0.9624 - val_loss: 0.6269 - val_accuracy: 0.8488 Epoch 63/200 88/88 [==============================] - 17s 197ms/step - loss: 0.1038 - accuracy: 0.9659 - val_loss: 0.6173 - val_accuracy: 0.8526 Epoch 64/200 88/88 [==============================] - 17s 198ms/step - loss: 0.0996 - accuracy: 0.9668 - val_loss: 0.5549 - val_accuracy: 0.8646 Epoch 65/200 88/88 [==============================] - 17s 198ms/step - loss: 0.0983 - accuracy: 0.9670 - val_loss: 0.6213 - val_accuracy: 0.8590 Epoch 66/200 88/88 [==============================] - 17s 197ms/step - loss: 0.0973 - accuracy: 0.9674 - val_loss: 0.5711 - val_accuracy: 0.8620 Epoch 67/200 88/88 [==============================] - 17s 197ms/step - loss: 0.0899 - accuracy: 0.9712 - val_loss: 0.6010 - val_accuracy: 0.8608 Epoch 68/200 88/88 [==============================] - 17s 197ms/step - loss: 0.0928 - accuracy: 0.9689 - val_loss: 0.6553 - val_accuracy: 0.8482 Epoch 69/200 88/88 [==============================] - 17s 197ms/step - loss: 0.0927 - accuracy: 0.9690 - val_loss: 0.5956 - val_accuracy: 0.8632 Epoch 70/200 88/88 [==============================] - 17s 197ms/step - loss: 0.0871 - accuracy: 0.9717 - val_loss: 0.7164 - val_accuracy: 0.8398 Epoch 71/200 88/88 [==============================] - 17s 197ms/step - loss: 0.0918 - accuracy: 0.9690 - val_loss: 0.5904 - val_accuracy: 0.8554 Epoch 72/200 88/88 [==============================] - 17s 197ms/step - loss: 0.0798 - accuracy: 0.9732 - val_loss: 3.3879 - val_accuracy: 0.8402 Epoch 73/200 88/88 [==============================] - 17s 197ms/step - loss: 0.0963 - accuracy: 0.9694 - val_loss: 0.6681 - val_accuracy: 0.8518 Epoch 74/200 88/88 [==============================] - 17s 197ms/step - loss: 0.0776 - accuracy: 0.9746 - val_loss: 0.6134 - val_accuracy: 0.8588 Finished model training in 1322s Model performance with training set 1407/1407 [==============================] - 16s 11ms/step - loss: 0.0217 - accuracy: 0.9930 Evaluating model performance with validation set 157/157 [==============================] - 2s 11ms/step - loss: 0.6134 - accuracy: 0.8588
plot_history(history_16)
pred16 = model_16.predict(x_test_norm)
pred16 = np.argmax(pred16, axis=1)
print_validation_report(y_test, pred16)
Classification Report
precision recall f1-score support
0 0.88 0.88 0.88 1000
1 0.96 0.91 0.93 1000
2 0.86 0.79 0.82 1000
3 0.66 0.80 0.72 1000
4 0.89 0.80 0.85 1000
5 0.76 0.80 0.78 1000
6 0.92 0.88 0.90 1000
7 0.90 0.90 0.90 1000
8 0.92 0.93 0.92 1000
9 0.90 0.92 0.91 1000
accuracy 0.86 10000
macro avg 0.87 0.86 0.86 10000
weighted avg 0.87 0.86 0.86 10000
Accuracy Score: 0.8597
Root Mean Square Error: 1.4781745499094483
plot_confusion_matrix_labeled( y_test, pred16)
Experiment 17¶
from keras.preprocessing.image import ImageDataGenerator
y_train_aug = tf.keras.utils.to_categorical(y_train_split, num_classes =10)
y_valid_aug = tf.keras.utils.to_categorical(y_valid_split, num_classes= 10)
x_train_aug = x_train_split.copy()
x_valid_aug = x_valid_split.copy()
x_test_aug = x_test.copy()
trdata = ImageDataGenerator(rescale=1.0/255.,
featurewise_center=True,
featurewise_std_normalization=True,
zoom_range=0.2,
)
trdata.fit(x_train_aug)
trdata.fit(x_valid_aug)
traindata = trdata.flow(x_train_aug, y_train_aug, batch_size= 512, shuffle= False)
#tsdata = ImageDataGenerator()
testdata = trdata.flow(x_valid_aug,y_valid_aug, batch_size=512)
trdata.fit(x_test_aug)
trdata.flow(x_test_aug)
<keras.preprocessing.image.NumpyArrayIterator at 0x7f82749dd110>
for i in range(0,4):
image = x_train_aug[i]
plt.imshow(image)
plt.show()
print(y)
[[0. 0. 0. ... 0. 0. 0.] [0. 1. 0. ... 0. 0. 0.] [1. 0. 0. ... 0. 0. 0.] ... [0. 0. 0. ... 0. 0. 0.] [0. 0. 0. ... 0. 0. 0.] [0. 0. 0. ... 0. 0. 0.]]
x,y = next(traindata)
for i in range(0,4):
image = x[i]
plt.imshow(image)
plt.show()
print(y)
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
[[0. 0. 0. ... 0. 0. 0.] [0. 1. 0. ... 0. 0. 0.] [1. 0. 0. ... 0. 0. 0.] ... [0. 0. 0. ... 0. 0. 0.] [0. 0. 0. ... 0. 0. 0.] [0. 0. 0. ... 0. 0. 0.]]
(45000, 3072)
x_train_norm.shape[1:]
(32, 32, 3)
model_17 = Sequential([
Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu,input_shape=x_train_norm.shape[1:], padding='same'),
MaxPool2D((2, 2),strides=2),
Dropout(0.5),
BatchNormalization(),
Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same'),
Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same'),
Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same'),
MaxPool2D((2, 2),strides=2),
Dropout(0.5),
BatchNormalization(),
Conv2D(filters= 512, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same'),
Conv2D(filters= 512, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same'),
Conv2D(filters= 512, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same'),
MaxPool2D((2, 2),strides=2),
Dropout(0.5),
BatchNormalization(),
Conv2D(filters=1024, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same' ),
Conv2D(filters=1024, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same' ),
Conv2D(filters=1024, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same' ),
MaxPool2D((2, 2),strides=2),
Dropout(0.5),
BatchNormalization(),
Flatten(),
Dense(units= 1024,activation=tf.nn.relu),
Dropout(0.3),
BatchNormalization(),
Dense(units= 1024,activation=tf.nn.relu),
Dropout(0.3),
BatchNormalization(),
Dense(units= 1024,activation=tf.nn.relu),
Dropout(0.3),
BatchNormalization(),
Dense(units=10, activation=tf.nn.softmax)
])
model_17.summary()
Model: "sequential_37" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_84 (Conv2D) (None, 32, 32, 128) 3584 _________________________________________________________________ max_pooling2d_60 (MaxPooling (None, 16, 16, 128) 0 _________________________________________________________________ dropout_89 (Dropout) (None, 16, 16, 128) 0 _________________________________________________________________ batch_normalization_147 (Bat (None, 16, 16, 128) 512 _________________________________________________________________ conv2d_85 (Conv2D) (None, 16, 16, 256) 295168 _________________________________________________________________ conv2d_86 (Conv2D) (None, 16, 16, 256) 590080 _________________________________________________________________ conv2d_87 (Conv2D) (None, 16, 16, 256) 590080 _________________________________________________________________ max_pooling2d_61 (MaxPooling (None, 8, 8, 256) 0 _________________________________________________________________ dropout_90 (Dropout) (None, 8, 8, 256) 0 _________________________________________________________________ batch_normalization_148 (Bat (None, 8, 8, 256) 1024 _________________________________________________________________ conv2d_88 (Conv2D) (None, 8, 8, 512) 1180160 _________________________________________________________________ conv2d_89 (Conv2D) (None, 8, 8, 512) 2359808 _________________________________________________________________ conv2d_90 (Conv2D) (None, 8, 8, 512) 2359808 _________________________________________________________________ max_pooling2d_62 (MaxPooling (None, 4, 4, 512) 0 _________________________________________________________________ dropout_91 (Dropout) (None, 4, 4, 512) 0 _________________________________________________________________ batch_normalization_149 (Bat (None, 4, 4, 512) 2048 _________________________________________________________________ conv2d_91 (Conv2D) (None, 4, 4, 1024) 4719616 _________________________________________________________________ conv2d_92 (Conv2D) (None, 4, 4, 1024) 9438208 _________________________________________________________________ conv2d_93 (Conv2D) (None, 4, 4, 1024) 9438208 _________________________________________________________________ max_pooling2d_63 (MaxPooling (None, 2, 2, 1024) 0 _________________________________________________________________ dropout_92 (Dropout) (None, 2, 2, 1024) 0 _________________________________________________________________ batch_normalization_150 (Bat (None, 2, 2, 1024) 4096 _________________________________________________________________ flatten_28 (Flatten) (None, 4096) 0 _________________________________________________________________ dense_129 (Dense) (None, 1024) 4195328 _________________________________________________________________ dropout_93 (Dropout) (None, 1024) 0 _________________________________________________________________ batch_normalization_151 (Bat (None, 1024) 4096 _________________________________________________________________ dense_130 (Dense) (None, 1024) 1049600 _________________________________________________________________ dropout_94 (Dropout) (None, 1024) 0 _________________________________________________________________ batch_normalization_152 (Bat (None, 1024) 4096 _________________________________________________________________ dense_131 (Dense) (None, 1024) 1049600 _________________________________________________________________ dropout_95 (Dropout) (None, 1024) 0 _________________________________________________________________ batch_normalization_153 (Bat (None, 1024) 4096 _________________________________________________________________ dense_132 (Dense) (None, 10) 10250 ================================================================= Total params: 37,299,466 Trainable params: 37,289,482 Non-trainable params: 9,984 _________________________________________________________________
model_17.compile(optimizer='adam',
loss = 'CategoricalCrossentropy',
metrics=['accuracy'])
history_17 = model_17.fit(traindata, epochs=200,
validation_data=testdata,
#verbose=0,
callbacks=[
EarlyStopping(monitor='val_accuracy', patience=10),
ModelCheckpoint('/content/gdrive/My Drive/Colab Notebooks/models/model_{val_accuracy:.4f}.h5',
save_best_only=True,
save_weights_only=False,
monitor='val_accuracy')]
)
Epoch 1/200 88/88 [==============================] - 30s 320ms/step - loss: 2.3030 - accuracy: 0.1990 - val_loss: 3.5537 - val_accuracy: 0.1124 Epoch 2/200 88/88 [==============================] - 25s 283ms/step - loss: 1.9362 - accuracy: 0.2680 - val_loss: 2.6391 - val_accuracy: 0.1528 Epoch 3/200 88/88 [==============================] - 25s 279ms/step - loss: 1.8222 - accuracy: 0.3089 - val_loss: 2.6730 - val_accuracy: 0.1302 Epoch 4/200 88/88 [==============================] - 25s 280ms/step - loss: 1.6795 - accuracy: 0.3582 - val_loss: 2.0688 - val_accuracy: 0.2538 Epoch 5/200 88/88 [==============================] - 25s 282ms/step - loss: 1.5322 - accuracy: 0.4229 - val_loss: 2.2629 - val_accuracy: 0.3174 Epoch 6/200 88/88 [==============================] - 30s 339ms/step - loss: 1.4164 - accuracy: 0.4764 - val_loss: 1.8948 - val_accuracy: 0.4116 Epoch 7/200 88/88 [==============================] - 25s 284ms/step - loss: 1.2900 - accuracy: 0.5301 - val_loss: 1.5694 - val_accuracy: 0.4890 Epoch 8/200 88/88 [==============================] - 25s 282ms/step - loss: 1.1781 - accuracy: 0.5773 - val_loss: 1.7497 - val_accuracy: 0.4798 Epoch 9/200 88/88 [==============================] - 25s 280ms/step - loss: 1.0874 - accuracy: 0.6125 - val_loss: 1.3142 - val_accuracy: 0.5762 Epoch 10/200 88/88 [==============================] - 25s 283ms/step - loss: 0.9930 - accuracy: 0.6479 - val_loss: 1.0484 - val_accuracy: 0.6390 Epoch 11/200 88/88 [==============================] - 30s 336ms/step - loss: 0.9307 - accuracy: 0.6731 - val_loss: 0.8999 - val_accuracy: 0.6900 Epoch 12/200 88/88 [==============================] - 25s 279ms/step - loss: 0.8681 - accuracy: 0.6972 - val_loss: 0.8877 - val_accuracy: 0.7012 Epoch 13/200 88/88 [==============================] - 25s 282ms/step - loss: 0.8179 - accuracy: 0.7174 - val_loss: 0.9445 - val_accuracy: 0.6912 Epoch 14/200 88/88 [==============================] - 25s 281ms/step - loss: 0.7734 - accuracy: 0.7327 - val_loss: 0.7795 - val_accuracy: 0.7384 Epoch 15/200 88/88 [==============================] - 27s 300ms/step - loss: 0.7255 - accuracy: 0.7510 - val_loss: 0.7360 - val_accuracy: 0.7596 Epoch 16/200 88/88 [==============================] - 25s 283ms/step - loss: 0.6860 - accuracy: 0.7638 - val_loss: 0.8017 - val_accuracy: 0.7452 Epoch 17/200 88/88 [==============================] - 25s 279ms/step - loss: 0.6543 - accuracy: 0.7777 - val_loss: 0.6242 - val_accuracy: 0.7888 Epoch 18/200 88/88 [==============================] - 25s 282ms/step - loss: 0.6220 - accuracy: 0.7882 - val_loss: 0.6970 - val_accuracy: 0.7704 Epoch 19/200 88/88 [==============================] - 25s 279ms/step - loss: 0.5906 - accuracy: 0.7991 - val_loss: 0.6515 - val_accuracy: 0.7810 Epoch 20/200 88/88 [==============================] - 25s 283ms/step - loss: 0.5625 - accuracy: 0.8106 - val_loss: 0.6536 - val_accuracy: 0.7816 Epoch 21/200 88/88 [==============================] - 27s 301ms/step - loss: 0.5286 - accuracy: 0.8224 - val_loss: 0.5819 - val_accuracy: 0.8094 Epoch 22/200 88/88 [==============================] - 24s 276ms/step - loss: 0.5055 - accuracy: 0.8297 - val_loss: 0.5758 - val_accuracy: 0.8112 Epoch 23/200 88/88 [==============================] - 25s 276ms/step - loss: 0.4864 - accuracy: 0.8378 - val_loss: 0.5396 - val_accuracy: 0.8218 Epoch 24/200 88/88 [==============================] - 24s 273ms/step - loss: 0.4672 - accuracy: 0.8444 - val_loss: 0.5588 - val_accuracy: 0.8204 Epoch 25/200 88/88 [==============================] - 25s 279ms/step - loss: 0.4365 - accuracy: 0.8528 - val_loss: 0.5722 - val_accuracy: 0.8160 Epoch 26/200 88/88 [==============================] - 26s 295ms/step - loss: 0.4180 - accuracy: 0.8592 - val_loss: 0.5626 - val_accuracy: 0.8202 Epoch 27/200 88/88 [==============================] - 24s 275ms/step - loss: 0.3966 - accuracy: 0.8669 - val_loss: 0.5303 - val_accuracy: 0.8266 Epoch 28/200 88/88 [==============================] - 24s 272ms/step - loss: 0.3842 - accuracy: 0.8717 - val_loss: 0.5047 - val_accuracy: 0.8432 Epoch 29/200 88/88 [==============================] - 24s 275ms/step - loss: 0.3729 - accuracy: 0.8737 - val_loss: 0.4814 - val_accuracy: 0.8416 Epoch 30/200 88/88 [==============================] - 24s 276ms/step - loss: 0.3530 - accuracy: 0.8786 - val_loss: 0.4872 - val_accuracy: 0.8476 Epoch 31/200 88/88 [==============================] - 30s 340ms/step - loss: 0.3375 - accuracy: 0.8855 - val_loss: 0.4986 - val_accuracy: 0.8444 Epoch 32/200 88/88 [==============================] - 24s 271ms/step - loss: 0.3186 - accuracy: 0.8929 - val_loss: 0.4836 - val_accuracy: 0.8446 Epoch 33/200 88/88 [==============================] - 24s 274ms/step - loss: 0.3026 - accuracy: 0.8960 - val_loss: 0.5261 - val_accuracy: 0.8444 Epoch 34/200 88/88 [==============================] - 25s 277ms/step - loss: 0.2902 - accuracy: 0.9009 - val_loss: 0.5010 - val_accuracy: 0.8508 Epoch 35/200 88/88 [==============================] - 25s 277ms/step - loss: 0.2838 - accuracy: 0.9036 - val_loss: 0.5310 - val_accuracy: 0.8464 Epoch 36/200 88/88 [==============================] - 27s 307ms/step - loss: 0.2685 - accuracy: 0.9096 - val_loss: 0.4798 - val_accuracy: 0.8490 Epoch 37/200 88/88 [==============================] - 24s 276ms/step - loss: 0.2578 - accuracy: 0.9122 - val_loss: 0.4950 - val_accuracy: 0.8520 Epoch 38/200 88/88 [==============================] - 25s 277ms/step - loss: 0.2457 - accuracy: 0.9178 - val_loss: 0.5224 - val_accuracy: 0.8530 Epoch 39/200 88/88 [==============================] - 24s 276ms/step - loss: 0.2349 - accuracy: 0.9200 - val_loss: 0.5290 - val_accuracy: 0.8434 Epoch 40/200 88/88 [==============================] - 24s 277ms/step - loss: 0.2312 - accuracy: 0.9232 - val_loss: 0.5094 - val_accuracy: 0.8526 Epoch 41/200 88/88 [==============================] - 27s 304ms/step - loss: 0.2197 - accuracy: 0.9258 - val_loss: 0.5079 - val_accuracy: 0.8518 Epoch 42/200 88/88 [==============================] - 25s 277ms/step - loss: 0.2126 - accuracy: 0.9276 - val_loss: 0.4796 - val_accuracy: 0.8588 Epoch 43/200 88/88 [==============================] - 24s 275ms/step - loss: 0.2031 - accuracy: 0.9312 - val_loss: 0.4574 - val_accuracy: 0.8670 Epoch 44/200 88/88 [==============================] - 25s 278ms/step - loss: 0.1972 - accuracy: 0.9319 - val_loss: 0.4833 - val_accuracy: 0.8578 Epoch 45/200 88/88 [==============================] - 24s 275ms/step - loss: 0.1934 - accuracy: 0.9344 - val_loss: 0.4912 - val_accuracy: 0.8602 Epoch 46/200 88/88 [==============================] - 27s 303ms/step - loss: 0.1776 - accuracy: 0.9401 - val_loss: 0.5186 - val_accuracy: 0.8542 Epoch 47/200 88/88 [==============================] - 24s 276ms/step - loss: 0.1696 - accuracy: 0.9424 - val_loss: 0.4985 - val_accuracy: 0.8636 Epoch 48/200 88/88 [==============================] - 25s 279ms/step - loss: 0.1634 - accuracy: 0.9440 - val_loss: 0.5215 - val_accuracy: 0.8634 Epoch 49/200 88/88 [==============================] - 25s 280ms/step - loss: 0.1613 - accuracy: 0.9454 - val_loss: 0.5725 - val_accuracy: 0.8434 Epoch 50/200 88/88 [==============================] - 25s 280ms/step - loss: 0.1603 - accuracy: 0.9463 - val_loss: 0.5386 - val_accuracy: 0.8558 Epoch 51/200 88/88 [==============================] - 27s 307ms/step - loss: 0.1516 - accuracy: 0.9497 - val_loss: 0.5238 - val_accuracy: 0.8586 Epoch 52/200 88/88 [==============================] - 25s 280ms/step - loss: 0.1454 - accuracy: 0.9506 - val_loss: 0.6025 - val_accuracy: 0.8442 Epoch 53/200 88/88 [==============================] - 25s 280ms/step - loss: 0.1461 - accuracy: 0.9504 - val_loss: 0.5047 - val_accuracy: 0.8688 Epoch 54/200 88/88 [==============================] - 25s 281ms/step - loss: 0.1466 - accuracy: 0.9510 - val_loss: 0.4825 - val_accuracy: 0.8758 Epoch 55/200 88/88 [==============================] - 25s 284ms/step - loss: 0.1317 - accuracy: 0.9548 - val_loss: 0.5292 - val_accuracy: 0.8560 Epoch 56/200 88/88 [==============================] - 27s 308ms/step - loss: 0.1312 - accuracy: 0.9559 - val_loss: 0.5144 - val_accuracy: 0.8620 Epoch 57/200 88/88 [==============================] - 25s 283ms/step - loss: 0.1323 - accuracy: 0.9553 - val_loss: 0.5447 - val_accuracy: 0.8586 Epoch 58/200 88/88 [==============================] - 24s 274ms/step - loss: 0.1268 - accuracy: 0.9574 - val_loss: 0.5108 - val_accuracy: 0.8632 Epoch 59/200 88/88 [==============================] - 25s 278ms/step - loss: 0.1176 - accuracy: 0.9610 - val_loss: 0.5008 - val_accuracy: 0.8702 Epoch 60/200 88/88 [==============================] - 25s 278ms/step - loss: 0.1208 - accuracy: 0.9595 - val_loss: 0.5207 - val_accuracy: 0.8644 Epoch 61/200 88/88 [==============================] - 25s 281ms/step - loss: 0.1172 - accuracy: 0.9602 - val_loss: 0.5113 - val_accuracy: 0.8754 Epoch 62/200 88/88 [==============================] - 27s 299ms/step - loss: 0.1140 - accuracy: 0.9625 - val_loss: 0.5760 - val_accuracy: 0.8532 Epoch 63/200 88/88 [==============================] - 25s 280ms/step - loss: 0.1048 - accuracy: 0.9650 - val_loss: 0.5186 - val_accuracy: 0.8680 Epoch 64/200 88/88 [==============================] - 25s 285ms/step - loss: 0.1043 - accuracy: 0.9653 - val_loss: 0.5475 - val_accuracy: 0.8622
plot_history(history_17)
pred17 = model_17.predict_generator(trdata.flow(x_test_aug, shuffle=False))
#pred17 = np.argmax(pred17, axis=1)
/usr/local/lib/python3.7/dist-packages/keras/engine/training.py:2035: UserWarning: `Model.predict_generator` is deprecated and will be removed in a future version. Please use `Model.predict`, which supports generators.
warnings.warn('`Model.predict_generator` is deprecated and '
pred17 = np.argmax(pred17, axis=1)
y_test
array([[3],
[8],
[8],
...,
[5],
[1],
[7]], dtype=uint8)
print_validation_report(y_test, pred17)
Classification Report
precision recall f1-score support
0 0.90 0.87 0.88 1000
1 0.93 0.94 0.94 1000
2 0.85 0.79 0.82 1000
3 0.76 0.66 0.71 1000
4 0.88 0.83 0.85 1000
5 0.76 0.79 0.78 1000
6 0.85 0.92 0.88 1000
7 0.78 0.95 0.86 1000
8 0.92 0.91 0.92 1000
9 0.94 0.88 0.91 1000
accuracy 0.86 10000
macro avg 0.86 0.86 0.85 10000
weighted avg 0.86 0.86 0.85 10000
Accuracy Score: 0.8554
Root Mean Square Error: 1.5137370973851436
plot_confusion_matrix_labeled( y_test, pred17)
Experiment 18 : DNN with 5 dense ¶
from sklearn.decomposition import PCA
x_train_flat_aug = np.reshape(x_train_norm, (45000, 3072))
x_valid_flat_aug = np.reshape(x_valid_norm, (5000, 3072))
x_test_flat = np.reshape(x_test_norm, (10000, 3072))
# pca = PCA(n_components=0.95)
# train_images_red = pca.fit_transform(x_train_flat)
# valid_images_red = pca.transform(x_valid_flat)
# train_images_red.shape[1]
216
model_18 = Sequential([
#Flatten(input_shape= (32,32,3)),
#BatchNormalization(),
Dense( units= 512,input_shape = [3072], activation = tf.nn.relu ,kernel_regularizer=tf.keras.regularizers.L2(0.001) ),
BatchNormalization(),
Dense( units=512, activation = tf.nn.relu,kernel_regularizer=tf.keras.regularizers.L2(0.001)),
BatchNormalization(),
Dense( units=512, activation = tf.nn.relu,kernel_regularizer=tf.keras.regularizers.L2(0.001)),
BatchNormalization(),
Dense( units=512, activation = tf.nn.relu,kernel_regularizer=tf.keras.regularizers.L2(0.001)),
BatchNormalization(),
Dense(name = "output_layer", units = 10, activation = tf.nn.softmax)
])
model_18.summary()
Model: "sequential_3" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= dense_13 (Dense) (None, 512) 1573376 _________________________________________________________________ batch_normalization_13 (Batc (None, 512) 2048 _________________________________________________________________ dense_14 (Dense) (None, 512) 262656 _________________________________________________________________ batch_normalization_14 (Batc (None, 512) 2048 _________________________________________________________________ dense_15 (Dense) (None, 512) 262656 _________________________________________________________________ batch_normalization_15 (Batc (None, 512) 2048 _________________________________________________________________ dense_16 (Dense) (None, 512) 262656 _________________________________________________________________ batch_normalization_16 (Batc (None, 512) 2048 _________________________________________________________________ output_layer (Dense) (None, 10) 5130 ================================================================= Total params: 2,374,666 Trainable params: 2,370,570 Non-trainable params: 4,096 _________________________________________________________________
model_18.compile(optimizer='adam',
loss = 'CategoricalCrossentropy',
metrics=['accuracy'])
history_18 , model_18 = compile_train_model(model_18, x_train_flat_aug, y_train_split, x_valid_flat_aug, y_valid_split, epochs=200, optimizer = 'Adam', batch_size = 256 )
Epoch 1/200 176/176 [==============================] - 2s 6ms/step - loss: 3.8788 - accuracy: 0.3906 - val_loss: 3.7607 - val_accuracy: 0.3092 Epoch 2/200 176/176 [==============================] - 1s 5ms/step - loss: 2.9941 - accuracy: 0.4646 - val_loss: 3.0303 - val_accuracy: 0.3418 Epoch 3/200 176/176 [==============================] - 1s 5ms/step - loss: 2.4683 - accuracy: 0.4881 - val_loss: 3.0580 - val_accuracy: 0.2424 Epoch 4/200 176/176 [==============================] - 1s 5ms/step - loss: 2.1438 - accuracy: 0.4981 - val_loss: 3.2659 - val_accuracy: 0.2406 Epoch 5/200 176/176 [==============================] - 1s 5ms/step - loss: 1.9006 - accuracy: 0.5197 - val_loss: 2.3135 - val_accuracy: 0.3544 Epoch 6/200 176/176 [==============================] - 1s 5ms/step - loss: 1.7557 - accuracy: 0.5277 - val_loss: 2.3087 - val_accuracy: 0.3160 Epoch 7/200 176/176 [==============================] - 1s 5ms/step - loss: 1.6467 - accuracy: 0.5451 - val_loss: 2.5883 - val_accuracy: 0.3068 Epoch 8/200 176/176 [==============================] - 1s 5ms/step - loss: 1.5781 - accuracy: 0.5532 - val_loss: 2.7260 - val_accuracy: 0.2550 Epoch 9/200 176/176 [==============================] - 1s 5ms/step - loss: 1.5428 - accuracy: 0.5538 - val_loss: 2.2370 - val_accuracy: 0.3234 Epoch 10/200 176/176 [==============================] - 1s 5ms/step - loss: 1.5089 - accuracy: 0.5643 - val_loss: 2.7812 - val_accuracy: 0.2546 Epoch 11/200 176/176 [==============================] - 1s 5ms/step - loss: 1.4841 - accuracy: 0.5729 - val_loss: 2.4498 - val_accuracy: 0.2788 Epoch 12/200 176/176 [==============================] - 1s 5ms/step - loss: 1.4791 - accuracy: 0.5736 - val_loss: 3.9291 - val_accuracy: 0.1770 Epoch 13/200 176/176 [==============================] - 1s 5ms/step - loss: 1.4496 - accuracy: 0.5862 - val_loss: 2.3031 - val_accuracy: 0.3336 Epoch 14/200 176/176 [==============================] - 1s 5ms/step - loss: 1.4326 - accuracy: 0.5914 - val_loss: 2.1085 - val_accuracy: 0.3656 Epoch 15/200 176/176 [==============================] - 1s 5ms/step - loss: 1.4211 - accuracy: 0.5974 - val_loss: 1.9553 - val_accuracy: 0.4232 Epoch 16/200 176/176 [==============================] - 1s 5ms/step - loss: 1.4097 - accuracy: 0.6022 - val_loss: 2.1736 - val_accuracy: 0.3576 Epoch 17/200 176/176 [==============================] - 1s 5ms/step - loss: 1.3867 - accuracy: 0.6139 - val_loss: 2.5744 - val_accuracy: 0.3494 Epoch 18/200 176/176 [==============================] - 1s 5ms/step - loss: 1.3755 - accuracy: 0.6215 - val_loss: 1.9965 - val_accuracy: 0.4358 Epoch 19/200 176/176 [==============================] - 1s 5ms/step - loss: 1.3604 - accuracy: 0.6256 - val_loss: 2.4180 - val_accuracy: 0.3110 Epoch 20/200 176/176 [==============================] - 1s 5ms/step - loss: 1.3602 - accuracy: 0.6294 - val_loss: 2.4840 - val_accuracy: 0.3556 Epoch 21/200 176/176 [==============================] - 1s 5ms/step - loss: 1.3366 - accuracy: 0.6411 - val_loss: 2.3641 - val_accuracy: 0.3642 Epoch 22/200 176/176 [==============================] - 1s 5ms/step - loss: 1.3213 - accuracy: 0.6488 - val_loss: 2.1199 - val_accuracy: 0.4168 Epoch 23/200 176/176 [==============================] - 1s 5ms/step - loss: 1.3159 - accuracy: 0.6544 - val_loss: 2.1997 - val_accuracy: 0.4354 Epoch 24/200 176/176 [==============================] - 1s 5ms/step - loss: 1.2976 - accuracy: 0.6604 - val_loss: 1.9727 - val_accuracy: 0.4844 Epoch 25/200 176/176 [==============================] - 1s 5ms/step - loss: 1.2817 - accuracy: 0.6716 - val_loss: 2.3157 - val_accuracy: 0.3844 Epoch 26/200 176/176 [==============================] - 1s 5ms/step - loss: 1.2656 - accuracy: 0.6796 - val_loss: 2.2406 - val_accuracy: 0.4296 Epoch 27/200 176/176 [==============================] - 1s 5ms/step - loss: 1.2430 - accuracy: 0.6888 - val_loss: 2.1230 - val_accuracy: 0.4450 Epoch 28/200 176/176 [==============================] - 1s 5ms/step - loss: 1.2222 - accuracy: 0.6979 - val_loss: 2.1721 - val_accuracy: 0.4650 Epoch 29/200 176/176 [==============================] - 1s 5ms/step - loss: 1.2128 - accuracy: 0.7054 - val_loss: 2.2959 - val_accuracy: 0.4008 Epoch 30/200 176/176 [==============================] - 1s 5ms/step - loss: 1.2052 - accuracy: 0.7092 - val_loss: 2.2868 - val_accuracy: 0.4534 Epoch 31/200 176/176 [==============================] - 1s 5ms/step - loss: 1.1752 - accuracy: 0.7243 - val_loss: 3.4970 - val_accuracy: 0.3064 Epoch 32/200 176/176 [==============================] - 1s 5ms/step - loss: 1.1741 - accuracy: 0.7252 - val_loss: 2.1794 - val_accuracy: 0.4644 Epoch 33/200 176/176 [==============================] - 1s 5ms/step - loss: 1.1512 - accuracy: 0.7368 - val_loss: 2.2837 - val_accuracy: 0.4456 Epoch 34/200 176/176 [==============================] - 1s 5ms/step - loss: 1.1427 - accuracy: 0.7432 - val_loss: 2.2564 - val_accuracy: 0.4574 Finished model training in 34s Model performance with training set 1407/1407 [==============================] - 3s 2ms/step - loss: 1.4620 - accuracy: 0.6342 Evaluating model performance with validation set 157/157 [==============================] - 0s 2ms/step - loss: 2.2564 - accuracy: 0.4574
history_18 = model_18.fit(traindata, epochs=200,
validation_data=testdata,
#verbose=0,
callbacks=[
EarlyStopping(monitor='val_accuracy', patience=10),
ModelCheckpoint('/content/gdrive/My Drive/Colab Notebooks/models/model_{val_accuracy:.4f}.h5',
save_best_only=True,
save_weights_only=False,
monitor='val_accuracy')]
)
Epoch 1/200 88/88 [==============================] - 20s 196ms/step - loss: 1.7724 - accuracy: 0.3884 - val_loss: 1.8890 - val_accuracy: 0.3730 Epoch 2/200 88/88 [==============================] - 17s 194ms/step - loss: 1.4752 - accuracy: 0.4743 - val_loss: 1.5516 - val_accuracy: 0.4510 Epoch 3/200 88/88 [==============================] - 17s 191ms/step - loss: 1.3619 - accuracy: 0.5171 - val_loss: 1.5096 - val_accuracy: 0.4680 Epoch 4/200 88/88 [==============================] - 17s 192ms/step - loss: 1.2815 - accuracy: 0.5448 - val_loss: 1.4701 - val_accuracy: 0.4820 Epoch 5/200 88/88 [==============================] - 17s 190ms/step - loss: 1.2072 - accuracy: 0.5725 - val_loss: 1.4202 - val_accuracy: 0.5028 Epoch 6/200 88/88 [==============================] - 17s 191ms/step - loss: 1.1395 - accuracy: 0.5956 - val_loss: 1.4605 - val_accuracy: 0.5022 Epoch 7/200 88/88 [==============================] - 17s 190ms/step - loss: 1.0819 - accuracy: 0.6151 - val_loss: 1.4069 - val_accuracy: 0.5130 Epoch 8/200 88/88 [==============================] - 17s 190ms/step - loss: 1.0204 - accuracy: 0.6386 - val_loss: 1.4402 - val_accuracy: 0.5102 Epoch 9/200 88/88 [==============================] - 17s 189ms/step - loss: 0.9619 - accuracy: 0.6587 - val_loss: 1.3824 - val_accuracy: 0.5262 Epoch 10/200 88/88 [==============================] - 17s 191ms/step - loss: 0.9073 - accuracy: 0.6785 - val_loss: 1.4484 - val_accuracy: 0.5304 Epoch 11/200 88/88 [==============================] - 17s 191ms/step - loss: 0.8611 - accuracy: 0.6943 - val_loss: 1.4628 - val_accuracy: 0.5222 Epoch 12/200 88/88 [==============================] - 17s 190ms/step - loss: 0.8051 - accuracy: 0.7152 - val_loss: 1.4274 - val_accuracy: 0.5400 Epoch 13/200 88/88 [==============================] - 17s 189ms/step - loss: 0.7401 - accuracy: 0.7364 - val_loss: 1.4855 - val_accuracy: 0.5282 Epoch 14/200 88/88 [==============================] - 17s 189ms/step - loss: 0.6878 - accuracy: 0.7587 - val_loss: 1.5422 - val_accuracy: 0.5232 Epoch 15/200 88/88 [==============================] - 17s 189ms/step - loss: 0.6501 - accuracy: 0.7694 - val_loss: 1.5670 - val_accuracy: 0.5178 Epoch 16/200 88/88 [==============================] - 17s 190ms/step - loss: 0.5960 - accuracy: 0.7907 - val_loss: 1.5935 - val_accuracy: 0.5310 Epoch 17/200 88/88 [==============================] - 17s 188ms/step - loss: 0.5440 - accuracy: 0.8070 - val_loss: 1.5930 - val_accuracy: 0.5416 Epoch 18/200 88/88 [==============================] - 17s 189ms/step - loss: 0.5046 - accuracy: 0.8232 - val_loss: 1.6849 - val_accuracy: 0.5362 Epoch 19/200 88/88 [==============================] - 17s 188ms/step - loss: 0.4647 - accuracy: 0.8374 - val_loss: 1.7533 - val_accuracy: 0.5246 Epoch 20/200 88/88 [==============================] - 17s 189ms/step - loss: 0.4252 - accuracy: 0.8521 - val_loss: 1.7549 - val_accuracy: 0.5354 Epoch 21/200 88/88 [==============================] - 17s 189ms/step - loss: 0.3913 - accuracy: 0.8636 - val_loss: 1.8526 - val_accuracy: 0.5250 Epoch 22/200 88/88 [==============================] - 17s 190ms/step - loss: 0.3675 - accuracy: 0.8708 - val_loss: 1.8823 - val_accuracy: 0.5238 Epoch 23/200 88/88 [==============================] - 17s 188ms/step - loss: 0.3316 - accuracy: 0.8846 - val_loss: 1.9217 - val_accuracy: 0.5342 Epoch 24/200 88/88 [==============================] - 17s 188ms/step - loss: 0.3033 - accuracy: 0.8964 - val_loss: 1.9911 - val_accuracy: 0.5282 Epoch 25/200 88/88 [==============================] - 17s 189ms/step - loss: 0.2832 - accuracy: 0.8998 - val_loss: 2.0692 - val_accuracy: 0.5318 Epoch 26/200 88/88 [==============================] - 17s 191ms/step - loss: 0.2626 - accuracy: 0.9084 - val_loss: 2.0100 - val_accuracy: 0.5264 Epoch 27/200 88/88 [==============================] - 17s 188ms/step - loss: 0.2444 - accuracy: 0.9153 - val_loss: 2.0796 - val_accuracy: 0.5312
plot_history(history_18)
pred18 = model_18.predict(x_test_flat)
pred18 = np.argmax(pred18, axis=1)
print_validation_report(y_test, pred18)
Classification Report
precision recall f1-score support
0 0.49 0.53 0.51 1000
1 0.51 0.72 0.60 1000
2 0.34 0.35 0.34 1000
3 0.36 0.27 0.31 1000
4 0.44 0.37 0.40 1000
5 0.45 0.36 0.40 1000
6 0.48 0.61 0.54 1000
7 0.53 0.47 0.50 1000
8 0.67 0.43 0.52 1000
9 0.44 0.58 0.50 1000
accuracy 0.47 10000
macro avg 0.47 0.47 0.46 10000
weighted avg 0.47 0.47 0.46 10000
Accuracy Score: 0.4686
Root Mean Square Error: 3.278719262151
plot_confusion_matrix_labeled( y_test, pred18)
Experiment 18 A: DNN with 5 dense ¶
x_train_flat_aug = np.reshape(x_train_norm, (45000, 3072))
x_valid_flat_aug = np.reshape(x_valid_norm, (5000, 3072))
x_test_flat = np.reshape(x_test_norm, (10000, 3072))
model_18a = Sequential([
#Flatten(input_shape= (32,32,3)),
#BatchNormalization(),
Dense( units= 512,input_shape = [3072], activation = tf.nn.relu ,kernel_regularizer=tf.keras.regularizers.L2(0.001) ),
BatchNormalization(),
Dropout(0.3),
Dense( units=512, activation = tf.nn.relu,kernel_regularizer=tf.keras.regularizers.L2(0.001)),
BatchNormalization(),
Dropout(0.3) ,
Dense( units=512, activation = tf.nn.relu,kernel_regularizer=tf.keras.regularizers.L2(0.001)),
BatchNormalization(),
Dropout(0.3),
Dense( units=512, activation = tf.nn.relu,kernel_regularizer=tf.keras.regularizers.L2(0.001)),
BatchNormalization(),
Dense(name = "output_layer", units = 10, activation = tf.nn.softmax)
])
model_18a.summary()
Model: "sequential_7" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= dense_29 (Dense) (None, 512) 1573376 _________________________________________________________________ batch_normalization_29 (Batc (None, 512) 2048 _________________________________________________________________ dropout_12 (Dropout) (None, 512) 0 _________________________________________________________________ dense_30 (Dense) (None, 512) 262656 _________________________________________________________________ batch_normalization_30 (Batc (None, 512) 2048 _________________________________________________________________ dropout_13 (Dropout) (None, 512) 0 _________________________________________________________________ dense_31 (Dense) (None, 512) 262656 _________________________________________________________________ batch_normalization_31 (Batc (None, 512) 2048 _________________________________________________________________ dropout_14 (Dropout) (None, 512) 0 _________________________________________________________________ dense_32 (Dense) (None, 512) 262656 _________________________________________________________________ batch_normalization_32 (Batc (None, 512) 2048 _________________________________________________________________ output_layer (Dense) (None, 10) 5130 ================================================================= Total params: 2,374,666 Trainable params: 2,370,570 Non-trainable params: 4,096 _________________________________________________________________
history_18a , model_18a = compile_train_model(model_18a, x_train_flat_aug, y_train_split, x_valid_flat_aug, y_valid_split, epochs=200, optimizer = 'Adam', batch_size = 256 )
Epoch 1/200 176/176 [==============================] - 2s 6ms/step - loss: 1.6474 - accuracy: 0.4740 - val_loss: 2.0688 - val_accuracy: 0.3280 Epoch 2/200 176/176 [==============================] - 1s 5ms/step - loss: 1.6533 - accuracy: 0.4755 - val_loss: 2.0618 - val_accuracy: 0.3554 Epoch 3/200 176/176 [==============================] - 1s 5ms/step - loss: 1.6563 - accuracy: 0.4766 - val_loss: 1.8688 - val_accuracy: 0.3956 Epoch 4/200 176/176 [==============================] - 1s 5ms/step - loss: 1.6587 - accuracy: 0.4767 - val_loss: 1.9369 - val_accuracy: 0.3810 Epoch 5/200 176/176 [==============================] - 1s 5ms/step - loss: 1.6591 - accuracy: 0.4757 - val_loss: 1.7913 - val_accuracy: 0.4196 Epoch 6/200 176/176 [==============================] - 1s 5ms/step - loss: 1.6620 - accuracy: 0.4750 - val_loss: 2.0553 - val_accuracy: 0.3536 Epoch 7/200 176/176 [==============================] - 1s 5ms/step - loss: 1.6584 - accuracy: 0.4763 - val_loss: 1.8694 - val_accuracy: 0.3834 Epoch 8/200 176/176 [==============================] - 1s 5ms/step - loss: 1.6726 - accuracy: 0.4701 - val_loss: 1.7887 - val_accuracy: 0.4182 Epoch 9/200 176/176 [==============================] - 1s 5ms/step - loss: 1.6694 - accuracy: 0.4726 - val_loss: 1.8224 - val_accuracy: 0.4072 Epoch 10/200 176/176 [==============================] - 1s 5ms/step - loss: 1.6604 - accuracy: 0.4741 - val_loss: 1.7214 - val_accuracy: 0.4490 Epoch 11/200 176/176 [==============================] - 1s 5ms/step - loss: 1.6642 - accuracy: 0.4756 - val_loss: 1.8264 - val_accuracy: 0.4154 Epoch 12/200 176/176 [==============================] - 1s 5ms/step - loss: 1.6603 - accuracy: 0.4759 - val_loss: 1.8683 - val_accuracy: 0.4032 Epoch 13/200 176/176 [==============================] - 1s 5ms/step - loss: 1.6594 - accuracy: 0.4755 - val_loss: 1.8371 - val_accuracy: 0.4038 Epoch 14/200 176/176 [==============================] - 1s 5ms/step - loss: 1.6553 - accuracy: 0.4784 - val_loss: 1.9967 - val_accuracy: 0.3576 Epoch 15/200 176/176 [==============================] - 1s 5ms/step - loss: 1.6682 - accuracy: 0.4723 - val_loss: 1.8947 - val_accuracy: 0.3932 Epoch 16/200 176/176 [==============================] - 1s 5ms/step - loss: 1.6643 - accuracy: 0.4785 - val_loss: 1.9728 - val_accuracy: 0.3634 Epoch 17/200 176/176 [==============================] - 1s 5ms/step - loss: 1.6558 - accuracy: 0.4790 - val_loss: 1.7945 - val_accuracy: 0.4248 Epoch 18/200 176/176 [==============================] - 1s 5ms/step - loss: 1.6523 - accuracy: 0.4752 - val_loss: 1.8562 - val_accuracy: 0.4076 Epoch 19/200 176/176 [==============================] - 1s 5ms/step - loss: 1.6561 - accuracy: 0.4759 - val_loss: 1.9172 - val_accuracy: 0.3900 Epoch 20/200 176/176 [==============================] - 1s 5ms/step - loss: 1.6591 - accuracy: 0.4754 - val_loss: 1.8077 - val_accuracy: 0.4348 Finished model training in 21s Model performance with training set 1407/1407 [==============================] - 3s 2ms/step - loss: 1.7069 - accuracy: 0.4693 Evaluating model performance with validation set 157/157 [==============================] - 0s 2ms/step - loss: 1.8077 - accuracy: 0.4348
plot_history(history_18a)
pred18a = model_18a.predict(x_test_flat)
pred18a = np.argmax(pred18a, axis=1)
print_validation_report(y_test, pred18a)
Classification Report
precision recall f1-score support
0 0.63 0.32 0.42 1000
1 0.58 0.60 0.59 1000
2 0.28 0.47 0.35 1000
3 0.28 0.27 0.28 1000
4 0.43 0.36 0.39 1000
5 0.35 0.45 0.39 1000
6 0.43 0.65 0.52 1000
7 0.53 0.49 0.51 1000
8 0.60 0.56 0.58 1000
9 0.71 0.26 0.38 1000
accuracy 0.44 10000
macro avg 0.48 0.44 0.44 10000
weighted avg 0.48 0.44 0.44 10000
Accuracy Score: 0.4423
Root Mean Square Error: 3.0306764921383476
plot_confusion_matrix_labeled( y_test, pred18a)
Experiment 20¶
from keras.preprocessing.image import ImageDataGenerator
import cv2
y_train_aug = tf.keras.utils.to_categorical(y_train_split, num_classes =10)
y_valid_aug = tf.keras.utils.to_categorical(y_valid_split, num_classes= 10)
y_test_aug = tf.keras.utils.to_categorical(y_test, num_classes= 10)
x_train_aug = x_train_split.copy()
x_valid_aug = x_valid_split.copy()
x_test_aug = x_test.copy()
trdata = ImageDataGenerator(rescale=1.0/255.,featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True
)
trdata.fit(x_train_aug)
trdata.fit(x_valid_aug)
trdata.fit(x_test_aug)
traindata = trdata.flow(x_train_aug, y_train_aug, batch_size= 512)
testdata = trdata.flow(x_valid_aug,y_valid_aug, batch_size=512)
#testdata_1 = trdata.flow(x_test_aug, y_test_aug, batch_size=512)
# x_train_aug =x_train_aug/255
# x_valid_aug = x_valid_aug/255
# x_test_aug = x_test_aug/255
x,y = next(traindata)
for i in range(0,4):
image = x[i]
#print(x[i])
plt.imshow(image)
plt.show()
print(y)
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
[[0. 0. 0. ... 1. 0. 0.] [0. 1. 0. ... 0. 0. 0.] [0. 0. 0. ... 0. 0. 0.] ... [0. 0. 0. ... 0. 0. 0.] [0. 0. 0. ... 0. 0. 0.] [0. 1. 0. ... 0. 0. 0.]]
model_20 = Sequential(
[
Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu,input_shape=x_train_norm.shape[1:], padding='same'),
MaxPool2D((2, 2),strides=2),
Dropout(0.5),
BatchNormalization(),
Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same'),
Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same'),
Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same'),
MaxPool2D((2, 2),strides=2),
Dropout(0.5),
BatchNormalization(),
Conv2D(filters= 512, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same'),
Conv2D(filters= 512, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same'),
Conv2D(filters= 512, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same'),
MaxPool2D((2, 2),strides=2),
Dropout(0.5),
BatchNormalization(),
Conv2D(filters=1024, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same' ),
Conv2D(filters=1024, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same' ),
Conv2D(filters=1024, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same' ),
MaxPool2D((2, 2),strides=2),
Dropout(0.5),
BatchNormalization(),
Flatten(),
Dense(units= 1024,activation=tf.nn.relu),
Dropout(0.3),
BatchNormalization(),
Dense(units= 1024,activation=tf.nn.relu),
Dropout(0.3),
BatchNormalization(),
Dense(units= 1024,activation=tf.nn.relu),
Dropout(0.3),
BatchNormalization(),
Dense(units=10, activation=tf.nn.softmax)
])
model_20.summary()
Model: "sequential_38" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_94 (Conv2D) (None, 32, 32, 128) 3584 _________________________________________________________________ max_pooling2d_64 (MaxPooling (None, 16, 16, 128) 0 _________________________________________________________________ dropout_96 (Dropout) (None, 16, 16, 128) 0 _________________________________________________________________ batch_normalization_154 (Bat (None, 16, 16, 128) 512 _________________________________________________________________ conv2d_95 (Conv2D) (None, 16, 16, 256) 295168 _________________________________________________________________ conv2d_96 (Conv2D) (None, 16, 16, 256) 590080 _________________________________________________________________ conv2d_97 (Conv2D) (None, 16, 16, 256) 590080 _________________________________________________________________ max_pooling2d_65 (MaxPooling (None, 8, 8, 256) 0 _________________________________________________________________ dropout_97 (Dropout) (None, 8, 8, 256) 0 _________________________________________________________________ batch_normalization_155 (Bat (None, 8, 8, 256) 1024 _________________________________________________________________ conv2d_98 (Conv2D) (None, 8, 8, 512) 1180160 _________________________________________________________________ conv2d_99 (Conv2D) (None, 8, 8, 512) 2359808 _________________________________________________________________ conv2d_100 (Conv2D) (None, 8, 8, 512) 2359808 _________________________________________________________________ max_pooling2d_66 (MaxPooling (None, 4, 4, 512) 0 _________________________________________________________________ dropout_98 (Dropout) (None, 4, 4, 512) 0 _________________________________________________________________ batch_normalization_156 (Bat (None, 4, 4, 512) 2048 _________________________________________________________________ conv2d_101 (Conv2D) (None, 4, 4, 1024) 4719616 _________________________________________________________________ conv2d_102 (Conv2D) (None, 4, 4, 1024) 9438208 _________________________________________________________________ conv2d_103 (Conv2D) (None, 4, 4, 1024) 9438208 _________________________________________________________________ max_pooling2d_67 (MaxPooling (None, 2, 2, 1024) 0 _________________________________________________________________ dropout_99 (Dropout) (None, 2, 2, 1024) 0 _________________________________________________________________ batch_normalization_157 (Bat (None, 2, 2, 1024) 4096 _________________________________________________________________ flatten_29 (Flatten) (None, 4096) 0 _________________________________________________________________ dense_133 (Dense) (None, 1024) 4195328 _________________________________________________________________ dropout_100 (Dropout) (None, 1024) 0 _________________________________________________________________ batch_normalization_158 (Bat (None, 1024) 4096 _________________________________________________________________ dense_134 (Dense) (None, 1024) 1049600 _________________________________________________________________ dropout_101 (Dropout) (None, 1024) 0 _________________________________________________________________ batch_normalization_159 (Bat (None, 1024) 4096 _________________________________________________________________ dense_135 (Dense) (None, 1024) 1049600 _________________________________________________________________ dropout_102 (Dropout) (None, 1024) 0 _________________________________________________________________ batch_normalization_160 (Bat (None, 1024) 4096 _________________________________________________________________ dense_136 (Dense) (None, 10) 10250 ================================================================= Total params: 37,299,466 Trainable params: 37,289,482 Non-trainable params: 9,984 _________________________________________________________________
model_20.compile(optimizer='adam',
loss = 'CategoricalCrossentropy',
metrics=['accuracy'])
history_20 = model_20.fit(traindata, epochs=200,
validation_data=testdata,
#verbose=0,
callbacks=[
EarlyStopping(monitor='val_accuracy', patience=10),
ModelCheckpoint('/content/gdrive/My Drive/Colab Notebooks/models/model_{val_accuracy:.4f}.h5',
save_best_only=True,
save_weights_only=False,
monitor='val_accuracy')]
)
Epoch 1/200 88/88 [==============================] - 31s 331ms/step - loss: 2.3150 - accuracy: 0.1884 - val_loss: 4.4897 - val_accuracy: 0.1006 Epoch 2/200 88/88 [==============================] - 29s 324ms/step - loss: 1.9835 - accuracy: 0.2496 - val_loss: 4.0308 - val_accuracy: 0.0982 Epoch 3/200 88/88 [==============================] - 30s 345ms/step - loss: 1.8635 - accuracy: 0.2952 - val_loss: 3.8304 - val_accuracy: 0.1106 Epoch 4/200 88/88 [==============================] - 28s 320ms/step - loss: 1.7417 - accuracy: 0.3326 - val_loss: 3.3013 - val_accuracy: 0.1298 Epoch 5/200 88/88 [==============================] - 29s 323ms/step - loss: 1.6526 - accuracy: 0.3629 - val_loss: 2.6492 - val_accuracy: 0.2180 Epoch 6/200 88/88 [==============================] - 28s 320ms/step - loss: 1.5637 - accuracy: 0.4030 - val_loss: 2.2938 - val_accuracy: 0.3084 Epoch 7/200 88/88 [==============================] - 31s 346ms/step - loss: 1.4675 - accuracy: 0.4538 - val_loss: 1.9367 - val_accuracy: 0.3924 Epoch 8/200 88/88 [==============================] - 29s 322ms/step - loss: 1.3670 - accuracy: 0.4992 - val_loss: 1.4608 - val_accuracy: 0.4890 Epoch 9/200 88/88 [==============================] - 29s 323ms/step - loss: 1.2888 - accuracy: 0.5349 - val_loss: 1.3804 - val_accuracy: 0.5236 Epoch 10/200 88/88 [==============================] - 28s 319ms/step - loss: 1.2046 - accuracy: 0.5714 - val_loss: 1.2974 - val_accuracy: 0.5510 Epoch 11/200 88/88 [==============================] - 30s 337ms/step - loss: 1.1412 - accuracy: 0.5981 - val_loss: 1.1141 - val_accuracy: 0.6260 Epoch 12/200 88/88 [==============================] - 28s 321ms/step - loss: 1.0661 - accuracy: 0.6257 - val_loss: 1.2099 - val_accuracy: 0.6176 Epoch 13/200 88/88 [==============================] - 28s 318ms/step - loss: 1.0317 - accuracy: 0.6387 - val_loss: 1.1553 - val_accuracy: 0.6154 Epoch 14/200 88/88 [==============================] - 29s 323ms/step - loss: 0.9725 - accuracy: 0.6588 - val_loss: 0.9805 - val_accuracy: 0.6716 Epoch 15/200 88/88 [==============================] - 28s 316ms/step - loss: 0.9307 - accuracy: 0.6782 - val_loss: 0.8800 - val_accuracy: 0.6958 Epoch 16/200 88/88 [==============================] - 34s 382ms/step - loss: 0.8981 - accuracy: 0.6890 - val_loss: 1.0039 - val_accuracy: 0.6864 Epoch 17/200 88/88 [==============================] - 29s 324ms/step - loss: 0.8585 - accuracy: 0.7065 - val_loss: 0.8781 - val_accuracy: 0.7072 Epoch 18/200 88/88 [==============================] - 28s 320ms/step - loss: 0.8143 - accuracy: 0.7205 - val_loss: 0.9446 - val_accuracy: 0.6830 Epoch 19/200 88/88 [==============================] - 28s 321ms/step - loss: 0.7966 - accuracy: 0.7294 - val_loss: 0.8047 - val_accuracy: 0.7372 Epoch 20/200 88/88 [==============================] - 31s 346ms/step - loss: 0.7645 - accuracy: 0.7402 - val_loss: 0.7930 - val_accuracy: 0.7394 Epoch 21/200 88/88 [==============================] - 29s 324ms/step - loss: 0.7546 - accuracy: 0.7458 - val_loss: 0.7337 - val_accuracy: 0.7554 Epoch 22/200 88/88 [==============================] - 28s 322ms/step - loss: 0.7315 - accuracy: 0.7548 - val_loss: 0.6640 - val_accuracy: 0.7756 Epoch 23/200 88/88 [==============================] - 28s 319ms/step - loss: 0.6920 - accuracy: 0.7681 - val_loss: 0.6734 - val_accuracy: 0.7748 Epoch 24/200 88/88 [==============================] - 31s 347ms/step - loss: 0.6775 - accuracy: 0.7729 - val_loss: 0.6420 - val_accuracy: 0.7860 Epoch 25/200 88/88 [==============================] - 29s 325ms/step - loss: 0.6571 - accuracy: 0.7790 - val_loss: 0.6602 - val_accuracy: 0.7774 Epoch 26/200 88/88 [==============================] - 29s 333ms/step - loss: 0.6403 - accuracy: 0.7849 - val_loss: 0.6927 - val_accuracy: 0.7734 Epoch 27/200 88/88 [==============================] - 30s 336ms/step - loss: 0.6227 - accuracy: 0.7914 - val_loss: 0.6541 - val_accuracy: 0.7808 Epoch 28/200 88/88 [==============================] - 29s 330ms/step - loss: 0.6042 - accuracy: 0.7968 - val_loss: 0.5320 - val_accuracy: 0.8184 Epoch 29/200 88/88 [==============================] - 33s 371ms/step - loss: 0.5820 - accuracy: 0.8053 - val_loss: 0.5665 - val_accuracy: 0.8012 Epoch 30/200 88/88 [==============================] - 28s 317ms/step - loss: 0.5677 - accuracy: 0.8093 - val_loss: 0.5924 - val_accuracy: 0.8060 Epoch 31/200 88/88 [==============================] - 29s 322ms/step - loss: 0.5559 - accuracy: 0.8140 - val_loss: 0.5223 - val_accuracy: 0.8244 Epoch 32/200 88/88 [==============================] - 28s 321ms/step - loss: 0.5428 - accuracy: 0.8167 - val_loss: 0.5305 - val_accuracy: 0.8222 Epoch 33/200 88/88 [==============================] - 31s 350ms/step - loss: 0.5299 - accuracy: 0.8236 - val_loss: 0.5737 - val_accuracy: 0.8066 Epoch 34/200 88/88 [==============================] - 28s 321ms/step - loss: 0.5102 - accuracy: 0.8294 - val_loss: 0.5135 - val_accuracy: 0.8234 Epoch 35/200 88/88 [==============================] - 29s 325ms/step - loss: 0.5070 - accuracy: 0.8284 - val_loss: 0.4961 - val_accuracy: 0.8330 Epoch 36/200 88/88 [==============================] - 28s 318ms/step - loss: 0.4966 - accuracy: 0.8334 - val_loss: 0.5305 - val_accuracy: 0.8236 Epoch 37/200 88/88 [==============================] - 31s 349ms/step - loss: 0.4811 - accuracy: 0.8384 - val_loss: 0.5384 - val_accuracy: 0.8206 Epoch 38/200 88/88 [==============================] - 29s 323ms/step - loss: 0.4689 - accuracy: 0.8428 - val_loss: 0.5787 - val_accuracy: 0.8144 Epoch 39/200 88/88 [==============================] - 28s 321ms/step - loss: 0.4631 - accuracy: 0.8456 - val_loss: 0.4859 - val_accuracy: 0.8368 Epoch 40/200 88/88 [==============================] - 29s 328ms/step - loss: 0.4500 - accuracy: 0.8488 - val_loss: 0.4814 - val_accuracy: 0.8368 Epoch 41/200 88/88 [==============================] - 29s 333ms/step - loss: 0.4380 - accuracy: 0.8525 - val_loss: 0.4736 - val_accuracy: 0.8494 Epoch 42/200 88/88 [==============================] - 34s 387ms/step - loss: 0.4256 - accuracy: 0.8585 - val_loss: 0.5072 - val_accuracy: 0.8352 Epoch 43/200 88/88 [==============================] - 29s 330ms/step - loss: 0.4339 - accuracy: 0.8545 - val_loss: 0.4605 - val_accuracy: 0.8496 Epoch 44/200 88/88 [==============================] - 29s 326ms/step - loss: 0.4185 - accuracy: 0.8594 - val_loss: 0.4918 - val_accuracy: 0.8392 Epoch 45/200 88/88 [==============================] - 29s 331ms/step - loss: 0.4000 - accuracy: 0.8666 - val_loss: 0.4572 - val_accuracy: 0.8466 Epoch 46/200 88/88 [==============================] - 32s 357ms/step - loss: 0.3999 - accuracy: 0.8634 - val_loss: 0.4624 - val_accuracy: 0.8468 Epoch 47/200 88/88 [==============================] - 29s 330ms/step - loss: 0.3960 - accuracy: 0.8665 - val_loss: 0.4800 - val_accuracy: 0.8476 Epoch 48/200 88/88 [==============================] - 29s 325ms/step - loss: 0.3885 - accuracy: 0.8687 - val_loss: 0.4138 - val_accuracy: 0.8608 Epoch 49/200 88/88 [==============================] - 29s 324ms/step - loss: 0.3728 - accuracy: 0.8748 - val_loss: 0.4423 - val_accuracy: 0.8566 Epoch 50/200 88/88 [==============================] - 31s 354ms/step - loss: 0.3718 - accuracy: 0.8752 - val_loss: 0.4330 - val_accuracy: 0.8548 Epoch 51/200 88/88 [==============================] - 29s 327ms/step - loss: 0.3618 - accuracy: 0.8776 - val_loss: 0.4333 - val_accuracy: 0.8574 Epoch 52/200 88/88 [==============================] - 28s 322ms/step - loss: 0.3558 - accuracy: 0.8801 - val_loss: 0.4095 - val_accuracy: 0.8632 Epoch 53/200 88/88 [==============================] - 28s 321ms/step - loss: 0.3577 - accuracy: 0.8781 - val_loss: 0.4311 - val_accuracy: 0.8570 Epoch 54/200 88/88 [==============================] - 29s 323ms/step - loss: 0.3484 - accuracy: 0.8811 - val_loss: 0.4202 - val_accuracy: 0.8666 Epoch 55/200 88/88 [==============================] - 35s 389ms/step - loss: 0.3373 - accuracy: 0.8866 - val_loss: 0.4216 - val_accuracy: 0.8684 Epoch 56/200 88/88 [==============================] - 29s 324ms/step - loss: 0.3339 - accuracy: 0.8877 - val_loss: 0.4212 - val_accuracy: 0.8634 Epoch 57/200 88/88 [==============================] - 28s 319ms/step - loss: 0.3292 - accuracy: 0.8905 - val_loss: 0.4405 - val_accuracy: 0.8642 Epoch 58/200 88/88 [==============================] - 29s 328ms/step - loss: 0.3115 - accuracy: 0.8942 - val_loss: 0.4273 - val_accuracy: 0.8652 Epoch 59/200 88/88 [==============================] - 31s 352ms/step - loss: 0.3175 - accuracy: 0.8914 - val_loss: 0.4138 - val_accuracy: 0.8666 Epoch 60/200 88/88 [==============================] - 29s 324ms/step - loss: 0.3144 - accuracy: 0.8941 - val_loss: 0.4072 - val_accuracy: 0.8704 Epoch 61/200 88/88 [==============================] - 29s 328ms/step - loss: 0.3031 - accuracy: 0.8966 - val_loss: 0.4041 - val_accuracy: 0.8662 Epoch 62/200 88/88 [==============================] - 29s 330ms/step - loss: 0.3067 - accuracy: 0.8977 - val_loss: 0.4024 - val_accuracy: 0.8686 Epoch 63/200 88/88 [==============================] - 29s 333ms/step - loss: 0.2965 - accuracy: 0.8989 - val_loss: 0.3895 - val_accuracy: 0.8774 Epoch 64/200 88/88 [==============================] - 34s 388ms/step - loss: 0.2918 - accuracy: 0.8997 - val_loss: 0.4121 - val_accuracy: 0.8680 Epoch 65/200 88/88 [==============================] - 29s 328ms/step - loss: 0.2836 - accuracy: 0.9041 - val_loss: 0.4180 - val_accuracy: 0.8680 Epoch 66/200 88/88 [==============================] - 29s 325ms/step - loss: 0.2814 - accuracy: 0.9041 - val_loss: 0.4073 - val_accuracy: 0.8724 Epoch 67/200 88/88 [==============================] - 29s 328ms/step - loss: 0.2830 - accuracy: 0.9044 - val_loss: 0.3871 - val_accuracy: 0.8730 Epoch 68/200 88/88 [==============================] - 31s 354ms/step - loss: 0.2812 - accuracy: 0.9040 - val_loss: 0.4491 - val_accuracy: 0.8646 Epoch 69/200 88/88 [==============================] - 29s 329ms/step - loss: 0.2714 - accuracy: 0.9085 - val_loss: 0.3992 - val_accuracy: 0.8774 Epoch 70/200 88/88 [==============================] - 29s 325ms/step - loss: 0.2671 - accuracy: 0.9085 - val_loss: 0.4340 - val_accuracy: 0.8658 Epoch 71/200 88/88 [==============================] - 29s 322ms/step - loss: 0.2688 - accuracy: 0.9068 - val_loss: 0.4164 - val_accuracy: 0.8720 Epoch 72/200 88/88 [==============================] - 31s 349ms/step - loss: 0.2635 - accuracy: 0.9102 - val_loss: 0.3932 - val_accuracy: 0.8760 Epoch 73/200 88/88 [==============================] - 28s 321ms/step - loss: 0.2542 - accuracy: 0.9128 - val_loss: 0.3850 - val_accuracy: 0.8828 Epoch 74/200 88/88 [==============================] - 29s 325ms/step - loss: 0.2557 - accuracy: 0.9130 - val_loss: 0.3893 - val_accuracy: 0.8770 Epoch 75/200 88/88 [==============================] - 29s 330ms/step - loss: 0.2521 - accuracy: 0.9136 - val_loss: 0.3847 - val_accuracy: 0.8816 Epoch 76/200 88/88 [==============================] - 30s 339ms/step - loss: 0.2488 - accuracy: 0.9160 - val_loss: 0.3629 - val_accuracy: 0.8818 Epoch 77/200 88/88 [==============================] - 31s 346ms/step - loss: 0.2449 - accuracy: 0.9174 - val_loss: 0.3907 - val_accuracy: 0.8824 Epoch 78/200 88/88 [==============================] - 29s 326ms/step - loss: 0.2414 - accuracy: 0.9182 - val_loss: 0.4272 - val_accuracy: 0.8722 Epoch 79/200 88/88 [==============================] - 29s 329ms/step - loss: 0.2451 - accuracy: 0.9170 - val_loss: 0.4077 - val_accuracy: 0.8744 Epoch 80/200 88/88 [==============================] - 29s 325ms/step - loss: 0.2438 - accuracy: 0.9172 - val_loss: 0.4042 - val_accuracy: 0.8768 Epoch 81/200 88/88 [==============================] - 32s 357ms/step - loss: 0.2319 - accuracy: 0.9221 - val_loss: 2.2494 - val_accuracy: 0.8470 Epoch 82/200 88/88 [==============================] - 29s 326ms/step - loss: 0.2323 - accuracy: 0.9211 - val_loss: 0.3758 - val_accuracy: 0.8830 Epoch 83/200 88/88 [==============================] - 29s 326ms/step - loss: 0.2200 - accuracy: 0.9244 - val_loss: 0.3939 - val_accuracy: 0.8812 Epoch 84/200 88/88 [==============================] - 30s 335ms/step - loss: 0.2212 - accuracy: 0.9233 - val_loss: 0.3875 - val_accuracy: 0.8812 Epoch 85/200 88/88 [==============================] - 31s 348ms/step - loss: 0.2217 - accuracy: 0.9256 - val_loss: 0.4190 - val_accuracy: 0.8756 Epoch 86/200 88/88 [==============================] - 29s 323ms/step - loss: 0.2135 - accuracy: 0.9268 - val_loss: 0.3969 - val_accuracy: 0.8798 Epoch 87/200 88/88 [==============================] - 29s 324ms/step - loss: 0.2155 - accuracy: 0.9263 - val_loss: 0.3752 - val_accuracy: 0.8894 Epoch 88/200 88/88 [==============================] - 29s 326ms/step - loss: 0.2140 - accuracy: 0.9266 - val_loss: 0.3856 - val_accuracy: 0.8780 Epoch 89/200 88/88 [==============================] - 29s 327ms/step - loss: 0.2051 - accuracy: 0.9298 - val_loss: 0.4069 - val_accuracy: 0.8762 Epoch 90/200 88/88 [==============================] - 31s 345ms/step - loss: 0.2044 - accuracy: 0.9302 - val_loss: 0.3925 - val_accuracy: 0.8794 Epoch 91/200 88/88 [==============================] - 29s 328ms/step - loss: 0.2047 - accuracy: 0.9306 - val_loss: 0.3926 - val_accuracy: 0.8804 Epoch 92/200 88/88 [==============================] - 30s 336ms/step - loss: 0.2014 - accuracy: 0.9303 - val_loss: 0.3902 - val_accuracy: 0.8868 Epoch 93/200 88/88 [==============================] - 29s 328ms/step - loss: 0.2014 - accuracy: 0.9314 - val_loss: 0.3532 - val_accuracy: 0.8876 Epoch 94/200 88/88 [==============================] - 32s 365ms/step - loss: 0.1941 - accuracy: 0.9338 - val_loss: 0.4495 - val_accuracy: 0.8616 Epoch 95/200 88/88 [==============================] - 29s 326ms/step - loss: 0.2016 - accuracy: 0.9326 - val_loss: 0.4071 - val_accuracy: 0.8750 Epoch 96/200 88/88 [==============================] - 29s 332ms/step - loss: 0.1922 - accuracy: 0.9354 - val_loss: 0.3555 - val_accuracy: 0.8894 Epoch 97/200 88/88 [==============================] - 29s 328ms/step - loss: 0.1864 - accuracy: 0.9370 - val_loss: 0.4112 - val_accuracy: 0.8760
plot_history(history_20)
tf.keras.callbacks.ModelCheckpoint
keras.callbacks.ModelCheckpoint
tf.keras.models.load_model()
--------------------------------------------------------------------------- NameError Traceback (most recent call last) <ipython-input-41-1ca3adde2102> in <module>() ----> 1 model_20.load_weights(checkpoint_filepath) NameError: name 'model_20' is not defined
pred20 = model_20.predict(trdata.flow(x_test_aug, shuffle=False))
pred20 = np.argmax(pred20, axis=1)
--------------------------------------------------------------------------- NameError Traceback (most recent call last) <ipython-input-1-ce2ca4e701c0> in <module>() ----> 1 pred20 = model_20.predict(trdata.flow(trdata.flow(x_test_aug, shuffle=False))) 2 pred20 = np.argmax(pred20, axis=1) NameError: name 'model_20' is not defined
pred20
array([3, 7, 3, ..., 9, 2, 3])
print_validation_report(y_test, pred20)
Classification Report
precision recall f1-score support
0 0.10 0.08 0.09 1000
1 0.08 0.08 0.08 1000
2 0.11 0.09 0.10 1000
3 0.11 0.08 0.09 1000
4 0.10 0.14 0.12 1000
5 0.12 0.12 0.12 1000
6 0.09 0.13 0.11 1000
7 0.09 0.08 0.08 1000
8 0.10 0.11 0.11 1000
9 0.10 0.09 0.09 1000
accuracy 0.10 10000
macro avg 0.10 0.10 0.10 10000
weighted avg 0.10 0.10 0.10 10000
Accuracy Score: 0.0992
Root Mean Square Error: 4.012592678057418
plot_confusion_matrix_labeled( y_test, pred20)
Experiment 21¶
from keras.preprocessing.image import ImageDataGenerator
import cv2
y_train_aug = tf.keras.utils.to_categorical(y_train_split, num_classes =10)
y_valid_aug = tf.keras.utils.to_categorical(y_valid_split, num_classes= 10)
y_test_aug = tf.keras.utils.to_categorical(y_test, num_classes= 10)
x_train_aug = x_train_split.copy()
x_valid_aug = x_valid_split.copy()
x_test_aug = x_test.copy()
trdata = ImageDataGenerator(rescale=1.0/255.,featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True
)
trdata.fit(x_train_aug)
trdata.fit(x_valid_aug)
trdata.fit(x_test_aug)
traindata = trdata.flow(x_train_aug, y_train_aug, batch_size= 64)
testdata = trdata.flow(x_valid_aug,y_valid_aug, batch_size=64)
#testdata_1 = trdata.flow(x_test_aug, y_test_aug, batch_size=512)
x,y = next(traindata)
for i in range(0,4):
image = x[i]
#print(x[i])
plt.imshow(image)
plt.show()
print(y)
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
[[0. 1. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 1. 0. 0. 0.] [0. 0. 0. 1. 0. 0. 0. 0. 0. 0.] [1. 0. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0. 1. 0.] [0. 0. 0. 0. 0. 0. 1. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 1. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0. 1. 0.] [0. 0. 0. 0. 0. 0. 0. 0. 1. 0.] [0. 0. 0. 0. 0. 1. 0. 0. 0. 0.] [0. 0. 0. 0. 1. 0. 0. 0. 0. 0.] [0. 0. 0. 1. 0. 0. 0. 0. 0. 0.] [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0. 0. 1.] [0. 0. 0. 0. 0. 0. 0. 1. 0. 0.] [0. 0. 0. 0. 0. 0. 1. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 1. 0. 0.] [0. 0. 0. 0. 1. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 1. 0. 0. 0. 0.] [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 1. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0. 0. 1.] [0. 0. 0. 0. 1. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 1. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 1. 0. 0.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 1. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0. 0. 1.] [0. 0. 0. 0. 0. 0. 0. 0. 1. 0.] [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0. 1. 0.] [1. 0. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 1. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 1. 0. 0.] [0. 0. 0. 1. 0. 0. 0. 0. 0. 0.] [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 1. 0. 0. 0. 0. 0. 0.] [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0. 1. 0.] [0. 0. 0. 0. 0. 0. 0. 0. 0. 1.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 1. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0. 1. 0.] [0. 0. 0. 0. 0. 0. 0. 0. 1. 0.] [1. 0. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 1. 0. 0.] [1. 0. 0. 0. 0. 0. 0. 0. 0. 0.] [1. 0. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 1. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0. 0. 1.] [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 1. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0. 1. 0.] [0. 0. 0. 0. 1. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0. 0. 1.] [1. 0. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0. 1. 0.] [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 1. 0. 0. 0.] [1. 0. 0. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0. 0. 1.]]
model_21 = Sequential(
[
Conv2D(filters=128, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu,input_shape=x_train_norm.shape[1:], padding='same'),
MaxPool2D((2, 2),strides=2),
Dropout(0.5),
BatchNormalization(),
Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same'),
Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same'),
Conv2D(filters=256, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same'),
MaxPool2D((2, 2),strides=2),
Dropout(0.5),
BatchNormalization(),
Conv2D(filters= 512, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same'),
Conv2D(filters= 512, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same'),
Conv2D(filters= 512, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same'),
MaxPool2D((2, 2),strides=2),
Dropout(0.5),
BatchNormalization(),
Conv2D(filters=1024, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same' ),
Conv2D(filters=1024, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same' ),
Conv2D(filters=1024, kernel_size=(3, 3), strides=(1, 1), activation=tf.nn.relu, padding= 'same' ),
MaxPool2D((2, 2),strides=2),
Dropout(0.5),
BatchNormalization(),
Flatten(),
Dense(units= 1024,activation=tf.nn.relu),
Dropout(0.3),
BatchNormalization(),
Dense(units= 1024,activation=tf.nn.relu),
Dropout(0.3),
BatchNormalization(),
Dense(units= 1024,activation=tf.nn.relu),
Dropout(0.3),
BatchNormalization(),
Dense(units=10, activation=tf.nn.softmax)
])
model_21.summary()
Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 32, 32, 128) 3584 _________________________________________________________________ max_pooling2d (MaxPooling2D) (None, 16, 16, 128) 0 _________________________________________________________________ dropout (Dropout) (None, 16, 16, 128) 0 _________________________________________________________________ batch_normalization (BatchNo (None, 16, 16, 128) 512 _________________________________________________________________ conv2d_1 (Conv2D) (None, 16, 16, 256) 295168 _________________________________________________________________ conv2d_2 (Conv2D) (None, 16, 16, 256) 590080 _________________________________________________________________ conv2d_3 (Conv2D) (None, 16, 16, 256) 590080 _________________________________________________________________ max_pooling2d_1 (MaxPooling2 (None, 8, 8, 256) 0 _________________________________________________________________ dropout_1 (Dropout) (None, 8, 8, 256) 0 _________________________________________________________________ batch_normalization_1 (Batch (None, 8, 8, 256) 1024 _________________________________________________________________ conv2d_4 (Conv2D) (None, 8, 8, 512) 1180160 _________________________________________________________________ conv2d_5 (Conv2D) (None, 8, 8, 512) 2359808 _________________________________________________________________ conv2d_6 (Conv2D) (None, 8, 8, 512) 2359808 _________________________________________________________________ max_pooling2d_2 (MaxPooling2 (None, 4, 4, 512) 0 _________________________________________________________________ dropout_2 (Dropout) (None, 4, 4, 512) 0 _________________________________________________________________ batch_normalization_2 (Batch (None, 4, 4, 512) 2048 _________________________________________________________________ conv2d_7 (Conv2D) (None, 4, 4, 1024) 4719616 _________________________________________________________________ conv2d_8 (Conv2D) (None, 4, 4, 1024) 9438208 _________________________________________________________________ conv2d_9 (Conv2D) (None, 4, 4, 1024) 9438208 _________________________________________________________________ max_pooling2d_3 (MaxPooling2 (None, 2, 2, 1024) 0 _________________________________________________________________ dropout_3 (Dropout) (None, 2, 2, 1024) 0 _________________________________________________________________ batch_normalization_3 (Batch (None, 2, 2, 1024) 4096 _________________________________________________________________ flatten (Flatten) (None, 4096) 0 _________________________________________________________________ dense (Dense) (None, 1024) 4195328 _________________________________________________________________ dropout_4 (Dropout) (None, 1024) 0 _________________________________________________________________ batch_normalization_4 (Batch (None, 1024) 4096 _________________________________________________________________ dense_1 (Dense) (None, 1024) 1049600 _________________________________________________________________ dropout_5 (Dropout) (None, 1024) 0 _________________________________________________________________ batch_normalization_5 (Batch (None, 1024) 4096 _________________________________________________________________ dense_2 (Dense) (None, 1024) 1049600 _________________________________________________________________ dropout_6 (Dropout) (None, 1024) 0 _________________________________________________________________ batch_normalization_6 (Batch (None, 1024) 4096 _________________________________________________________________ dense_3 (Dense) (None, 10) 10250 ================================================================= Total params: 37,299,466 Trainable params: 37,289,482 Non-trainable params: 9,984 _________________________________________________________________
model_21.compile(optimizer='adam',
loss = 'CategoricalCrossentropy',
metrics=['accuracy'])
history_21 = model_21.fit(traindata, epochs=200,
validation_data=testdata,
#verbose=0,
callbacks=[
EarlyStopping(monitor='val_accuracy', patience=10),
ModelCheckpoint('/content/gdrive/My Drive/Colab Notebooks/models/model_{val_accuracy:.4f}.h5',
save_best_only=True,
save_weights_only=False,
monitor='val_accuracy')]
)
Epoch 1/200 704/704 [==============================] - 57s 54ms/step - loss: 2.2049 - accuracy: 0.2124 - val_loss: 1.9612 - val_accuracy: 0.2656 Epoch 2/200 704/704 [==============================] - 39s 55ms/step - loss: 1.8801 - accuracy: 0.2912 - val_loss: 1.8121 - val_accuracy: 0.3510 Epoch 3/200 704/704 [==============================] - 37s 52ms/step - loss: 1.6676 - accuracy: 0.3703 - val_loss: 1.5170 - val_accuracy: 0.4460 Epoch 4/200 704/704 [==============================] - 37s 52ms/step - loss: 1.5235 - accuracy: 0.4394 - val_loss: 1.4539 - val_accuracy: 0.4832 Epoch 5/200 704/704 [==============================] - 39s 55ms/step - loss: 1.4080 - accuracy: 0.4913 - val_loss: 1.3404 - val_accuracy: 0.5164 Epoch 6/200 704/704 [==============================] - 37s 52ms/step - loss: 1.2901 - accuracy: 0.5385 - val_loss: 1.2062 - val_accuracy: 0.5710 Epoch 7/200 704/704 [==============================] - 37s 53ms/step - loss: 1.2070 - accuracy: 0.5742 - val_loss: 1.3103 - val_accuracy: 0.5522 Epoch 8/200 704/704 [==============================] - 37s 52ms/step - loss: 1.1246 - accuracy: 0.6093 - val_loss: 1.0515 - val_accuracy: 0.6282 Epoch 9/200 704/704 [==============================] - 41s 59ms/step - loss: 1.0501 - accuracy: 0.6339 - val_loss: 1.0158 - val_accuracy: 0.6374 Epoch 10/200 704/704 [==============================] - 37s 52ms/step - loss: 1.0075 - accuracy: 0.6538 - val_loss: 0.9322 - val_accuracy: 0.6696 Epoch 11/200 704/704 [==============================] - 37s 52ms/step - loss: 0.9680 - accuracy: 0.6720 - val_loss: 0.9257 - val_accuracy: 0.6966 Epoch 12/200 704/704 [==============================] - 39s 55ms/step - loss: 0.9286 - accuracy: 0.6846 - val_loss: 0.9304 - val_accuracy: 0.6738 Epoch 13/200 704/704 [==============================] - 36s 52ms/step - loss: 0.8764 - accuracy: 0.7038 - val_loss: 0.7802 - val_accuracy: 0.7322 Epoch 14/200 704/704 [==============================] - 36s 52ms/step - loss: 0.9289 - accuracy: 0.6849 - val_loss: 0.8507 - val_accuracy: 0.7242 Epoch 15/200 704/704 [==============================] - 36s 52ms/step - loss: 0.8314 - accuracy: 0.7189 - val_loss: 0.7329 - val_accuracy: 0.7506 Epoch 16/200 704/704 [==============================] - 41s 58ms/step - loss: 0.7853 - accuracy: 0.7384 - val_loss: 0.8488 - val_accuracy: 0.7486 Epoch 17/200 704/704 [==============================] - 36s 51ms/step - loss: 0.7630 - accuracy: 0.7446 - val_loss: 0.7226 - val_accuracy: 0.7638 Epoch 18/200 704/704 [==============================] - 36s 51ms/step - loss: 0.7436 - accuracy: 0.7537 - val_loss: 0.6494 - val_accuracy: 0.7786 Epoch 19/200 704/704 [==============================] - 36s 51ms/step - loss: 0.7180 - accuracy: 0.7630 - val_loss: 0.6768 - val_accuracy: 0.7758 Epoch 20/200 704/704 [==============================] - 38s 54ms/step - loss: 0.7205 - accuracy: 0.7599 - val_loss: 0.6147 - val_accuracy: 0.7972 Epoch 21/200 704/704 [==============================] - 36s 51ms/step - loss: 0.6750 - accuracy: 0.7776 - val_loss: 0.7746 - val_accuracy: 0.7892 Epoch 22/200 704/704 [==============================] - 36s 51ms/step - loss: 0.6703 - accuracy: 0.7791 - val_loss: 0.6065 - val_accuracy: 0.7954 Epoch 23/200 704/704 [==============================] - 38s 54ms/step - loss: 0.6367 - accuracy: 0.7887 - val_loss: 2.1983 - val_accuracy: 0.7934 Epoch 24/200 704/704 [==============================] - 36s 51ms/step - loss: 0.6156 - accuracy: 0.7966 - val_loss: 0.7294 - val_accuracy: 0.8160 Epoch 25/200 704/704 [==============================] - 36s 52ms/step - loss: 0.6072 - accuracy: 0.8017 - val_loss: 0.5709 - val_accuracy: 0.8164 Epoch 26/200 704/704 [==============================] - 39s 56ms/step - loss: 0.5984 - accuracy: 0.8024 - val_loss: 0.5279 - val_accuracy: 0.8256 Epoch 27/200 704/704 [==============================] - 37s 52ms/step - loss: 0.5725 - accuracy: 0.8119 - val_loss: 0.5102 - val_accuracy: 0.8352 Epoch 28/200 704/704 [==============================] - 37s 52ms/step - loss: 0.5764 - accuracy: 0.8122 - val_loss: 0.5564 - val_accuracy: 0.8290 Epoch 29/200 704/704 [==============================] - 37s 52ms/step - loss: 0.5489 - accuracy: 0.8218 - val_loss: 0.5229 - val_accuracy: 0.8324 Epoch 30/200 704/704 [==============================] - 38s 55ms/step - loss: 0.5366 - accuracy: 0.8254 - val_loss: 0.5023 - val_accuracy: 0.8346 Epoch 31/200 704/704 [==============================] - 37s 52ms/step - loss: 0.5271 - accuracy: 0.8271 - val_loss: 0.5217 - val_accuracy: 0.8296 Epoch 32/200 704/704 [==============================] - 37s 52ms/step - loss: 0.5136 - accuracy: 0.8315 - val_loss: 0.5270 - val_accuracy: 0.8262 Epoch 33/200 704/704 [==============================] - 38s 54ms/step - loss: 0.5083 - accuracy: 0.8339 - val_loss: 0.5024 - val_accuracy: 0.8318 Epoch 34/200 704/704 [==============================] - 36s 52ms/step - loss: 0.4910 - accuracy: 0.8408 - val_loss: 0.4714 - val_accuracy: 0.8420 Epoch 35/200 704/704 [==============================] - 37s 52ms/step - loss: 0.4754 - accuracy: 0.8451 - val_loss: 0.4726 - val_accuracy: 0.8400 Epoch 36/200 704/704 [==============================] - 38s 54ms/step - loss: 0.4708 - accuracy: 0.8457 - val_loss: 0.4365 - val_accuracy: 0.8538 Epoch 37/200 704/704 [==============================] - 37s 52ms/step - loss: 0.4566 - accuracy: 0.8502 - val_loss: 0.5205 - val_accuracy: 0.8312 Epoch 38/200 704/704 [==============================] - 37s 52ms/step - loss: 0.4648 - accuracy: 0.8479 - val_loss: 0.4345 - val_accuracy: 0.8600 Epoch 39/200 704/704 [==============================] - 36s 52ms/step - loss: 0.4397 - accuracy: 0.8551 - val_loss: 0.4364 - val_accuracy: 0.8580 Epoch 40/200 704/704 [==============================] - 38s 54ms/step - loss: 0.4234 - accuracy: 0.8611 - val_loss: 0.4484 - val_accuracy: 0.8510 Epoch 41/200 704/704 [==============================] - 37s 52ms/step - loss: 0.4352 - accuracy: 0.8568 - val_loss: 0.4284 - val_accuracy: 0.8582 Epoch 42/200 704/704 [==============================] - 37s 52ms/step - loss: 0.4120 - accuracy: 0.8653 - val_loss: 0.4069 - val_accuracy: 0.8638 Epoch 43/200 704/704 [==============================] - 39s 55ms/step - loss: 0.3996 - accuracy: 0.8683 - val_loss: 0.4719 - val_accuracy: 0.8470 Epoch 44/200 704/704 [==============================] - 36s 52ms/step - loss: 0.3935 - accuracy: 0.8674 - val_loss: 0.4376 - val_accuracy: 0.8626 Epoch 45/200 704/704 [==============================] - 37s 52ms/step - loss: 0.3972 - accuracy: 0.8701 - val_loss: 0.4283 - val_accuracy: 0.8556 Epoch 46/200 704/704 [==============================] - 36s 51ms/step - loss: 0.3740 - accuracy: 0.8771 - val_loss: 0.4352 - val_accuracy: 0.8572 Epoch 47/200 704/704 [==============================] - 38s 54ms/step - loss: 0.3712 - accuracy: 0.8774 - val_loss: 0.4532 - val_accuracy: 0.8624 Epoch 48/200 704/704 [==============================] - 36s 52ms/step - loss: 0.3775 - accuracy: 0.8735 - val_loss: 0.5026 - val_accuracy: 0.8484 Epoch 49/200 704/704 [==============================] - 36s 52ms/step - loss: 0.3689 - accuracy: 0.8771 - val_loss: 0.4082 - val_accuracy: 0.8658 Epoch 50/200 704/704 [==============================] - 36s 51ms/step - loss: 0.3557 - accuracy: 0.8801 - val_loss: 0.3822 - val_accuracy: 0.8708 Epoch 51/200 704/704 [==============================] - 41s 58ms/step - loss: 0.3532 - accuracy: 0.8840 - val_loss: 0.4047 - val_accuracy: 0.8650 Epoch 52/200 704/704 [==============================] - 36s 51ms/step - loss: 0.3448 - accuracy: 0.8863 - val_loss: 0.3709 - val_accuracy: 0.8742 Epoch 53/200 704/704 [==============================] - 36s 51ms/step - loss: 0.3363 - accuracy: 0.8871 - val_loss: 0.4556 - val_accuracy: 0.8498 Epoch 54/200 704/704 [==============================] - 38s 54ms/step - loss: 0.3334 - accuracy: 0.8896 - val_loss: 0.3956 - val_accuracy: 0.8664 Epoch 55/200 704/704 [==============================] - 36s 52ms/step - loss: 0.3242 - accuracy: 0.8924 - val_loss: 0.4003 - val_accuracy: 0.8722 Epoch 56/200 704/704 [==============================] - 36s 52ms/step - loss: 0.3225 - accuracy: 0.8918 - val_loss: 0.3795 - val_accuracy: 0.8754 Epoch 57/200 704/704 [==============================] - 39s 55ms/step - loss: 0.3150 - accuracy: 0.8946 - val_loss: 0.3823 - val_accuracy: 0.8714 Epoch 58/200 704/704 [==============================] - 37s 52ms/step - loss: 0.3124 - accuracy: 0.8966 - val_loss: 0.3951 - val_accuracy: 0.8764 Epoch 59/200 704/704 [==============================] - 36s 52ms/step - loss: 0.3050 - accuracy: 0.8982 - val_loss: 0.3809 - val_accuracy: 0.8758 Epoch 60/200 704/704 [==============================] - 36s 52ms/step - loss: 0.3010 - accuracy: 0.9004 - val_loss: 0.3865 - val_accuracy: 0.8740 Epoch 61/200 704/704 [==============================] - 38s 54ms/step - loss: 0.2977 - accuracy: 0.9012 - val_loss: 0.4335 - val_accuracy: 0.8776 Epoch 62/200 704/704 [==============================] - 37s 52ms/step - loss: 0.3013 - accuracy: 0.9003 - val_loss: 0.4348 - val_accuracy: 0.8750 Epoch 63/200 704/704 [==============================] - 36s 52ms/step - loss: 0.2881 - accuracy: 0.9050 - val_loss: 0.3770 - val_accuracy: 0.8754 Epoch 64/200 704/704 [==============================] - 39s 55ms/step - loss: 0.2834 - accuracy: 0.9052 - val_loss: 0.3773 - val_accuracy: 0.8812 Epoch 65/200 704/704 [==============================] - 37s 52ms/step - loss: 0.2891 - accuracy: 0.9040 - val_loss: 0.4207 - val_accuracy: 0.8666 Epoch 66/200 704/704 [==============================] - 37s 52ms/step - loss: 0.2833 - accuracy: 0.9063 - val_loss: 0.3920 - val_accuracy: 0.8738 Epoch 67/200 704/704 [==============================] - 37s 52ms/step - loss: 0.2683 - accuracy: 0.9100 - val_loss: 0.3816 - val_accuracy: 0.8836 Epoch 68/200 704/704 [==============================] - 42s 59ms/step - loss: 0.2696 - accuracy: 0.9103 - val_loss: 0.3619 - val_accuracy: 0.8878 Epoch 69/200 704/704 [==============================] - 36s 52ms/step - loss: 0.2731 - accuracy: 0.9103 - val_loss: 0.3672 - val_accuracy: 0.8856 Epoch 70/200 704/704 [==============================] - 37s 52ms/step - loss: 0.2621 - accuracy: 0.9115 - val_loss: 0.3616 - val_accuracy: 0.8814 Epoch 71/200 704/704 [==============================] - 38s 55ms/step - loss: 0.2595 - accuracy: 0.9138 - val_loss: 0.3506 - val_accuracy: 0.8892 Epoch 72/200 704/704 [==============================] - 36s 52ms/step - loss: 0.2520 - accuracy: 0.9164 - val_loss: 0.3678 - val_accuracy: 0.8838 Epoch 73/200 704/704 [==============================] - 37s 52ms/step - loss: 0.2543 - accuracy: 0.9148 - val_loss: 0.3647 - val_accuracy: 0.8776 Epoch 74/200 704/704 [==============================] - 36s 51ms/step - loss: 0.2519 - accuracy: 0.9147 - val_loss: 0.3692 - val_accuracy: 0.8780 Epoch 75/200 704/704 [==============================] - 38s 54ms/step - loss: 0.2462 - accuracy: 0.9179 - val_loss: 0.3681 - val_accuracy: 0.8820 Epoch 76/200 704/704 [==============================] - 36s 52ms/step - loss: 0.2406 - accuracy: 0.9200 - val_loss: 0.3705 - val_accuracy: 0.8824 Epoch 77/200 704/704 [==============================] - 37s 52ms/step - loss: 0.2392 - accuracy: 0.9195 - val_loss: 0.3527 - val_accuracy: 0.8910 Epoch 78/200 704/704 [==============================] - 39s 55ms/step - loss: 0.2365 - accuracy: 0.9219 - val_loss: 0.3736 - val_accuracy: 0.8864 Epoch 79/200 704/704 [==============================] - 37s 52ms/step - loss: 0.2369 - accuracy: 0.9214 - val_loss: 0.4371 - val_accuracy: 0.8828 Epoch 80/200 704/704 [==============================] - 37s 52ms/step - loss: 0.2340 - accuracy: 0.9241 - val_loss: 0.4545 - val_accuracy: 0.8792 Epoch 81/200 704/704 [==============================] - 37s 52ms/step - loss: 0.2450 - accuracy: 0.9192 - val_loss: 0.4159 - val_accuracy: 0.8880 Epoch 82/200 704/704 [==============================] - 39s 55ms/step - loss: 0.2221 - accuracy: 0.9258 - val_loss: 0.3884 - val_accuracy: 0.8816 Epoch 83/200 704/704 [==============================] - 36s 52ms/step - loss: 0.2158 - accuracy: 0.9268 - val_loss: 0.3810 - val_accuracy: 0.8840 Epoch 84/200 704/704 [==============================] - 37s 52ms/step - loss: 0.2195 - accuracy: 0.9270 - val_loss: 0.3961 - val_accuracy: 0.8854 Epoch 85/200 704/704 [==============================] - 39s 55ms/step - loss: 0.2141 - accuracy: 0.9291 - val_loss: 0.3617 - val_accuracy: 0.8844 Epoch 86/200 704/704 [==============================] - 37s 52ms/step - loss: 0.2120 - accuracy: 0.9294 - val_loss: 0.3681 - val_accuracy: 0.8838 Epoch 87/200 704/704 [==============================] - 37s 52ms/step - loss: 0.2245 - accuracy: 0.9251 - val_loss: 0.3547 - val_accuracy: 0.8916 Epoch 88/200 704/704 [==============================] - 39s 55ms/step - loss: 0.2021 - accuracy: 0.9318 - val_loss: 0.3823 - val_accuracy: 0.8830 Epoch 89/200 704/704 [==============================] - 37s 53ms/step - loss: 0.2076 - accuracy: 0.9311 - val_loss: 0.3867 - val_accuracy: 0.8784 Epoch 90/200 704/704 [==============================] - 37s 52ms/step - loss: 0.2033 - accuracy: 0.9316 - val_loss: 0.3467 - val_accuracy: 0.8880 Epoch 91/200 704/704 [==============================] - 37s 52ms/step - loss: 0.2014 - accuracy: 0.9321 - val_loss: 0.3796 - val_accuracy: 0.8856 Epoch 92/200 704/704 [==============================] - 39s 55ms/step - loss: 0.1970 - accuracy: 0.9352 - val_loss: 0.3572 - val_accuracy: 0.8822 Epoch 93/200 704/704 [==============================] - 37s 52ms/step - loss: 0.1992 - accuracy: 0.9330 - val_loss: 0.3589 - val_accuracy: 0.8884 Epoch 94/200 704/704 [==============================] - 37s 52ms/step - loss: 0.1912 - accuracy: 0.9351 - val_loss: 0.3747 - val_accuracy: 0.8836 Epoch 95/200 704/704 [==============================] - 38s 54ms/step - loss: 0.1896 - accuracy: 0.9369 - val_loss: 0.3682 - val_accuracy: 0.8882 Epoch 96/200 704/704 [==============================] - 37s 52ms/step - loss: 0.1940 - accuracy: 0.9339 - val_loss: 0.3786 - val_accuracy: 0.8854 Epoch 97/200 704/704 [==============================] - 37s 52ms/step - loss: 0.1837 - accuracy: 0.9390 - val_loss: 0.3748 - val_accuracy: 0.8844
plot_history(history_21)
pred21 = model_21.predict_generator(trdata.flow(x_test_aug, shuffle=False))
pred21 = np.argmax(pred21, axis=1)
/usr/local/lib/python3.7/dist-packages/keras/engine/training.py:2035: UserWarning: `Model.predict_generator` is deprecated and will be removed in a future version. Please use `Model.predict`, which supports generators.
warnings.warn('`Model.predict_generator` is deprecated and '
print_validation_report(y_test, pred21)
Classification Report
precision recall f1-score support
0 0.94 0.82 0.88 1000
1 0.93 0.95 0.94 1000
2 0.83 0.87 0.85 1000
3 0.82 0.76 0.79 1000
4 0.86 0.87 0.87 1000
5 0.86 0.79 0.82 1000
6 0.89 0.94 0.92 1000
7 0.89 0.92 0.91 1000
8 0.88 0.95 0.91 1000
9 0.89 0.92 0.90 1000
accuracy 0.88 10000
macro avg 0.88 0.88 0.88 10000
weighted avg 0.88 0.88 0.88 10000
Accuracy Score: 0.8796
Root Mean Square Error: 1.5002999700059985
plot_confusion_matrix_labeled( y_test, pred21)
# create convolution plot
https://machinelearningmastery.com/how-to-visualize-filters-and-feature-maps-in-convolutional-neural-networks/
# summarize feature map shapes
for i in range(len(model_21.layers)):
layer = model_21.layers[i]
# summarize output shape
print(i, layer.name, layer.output.shape)
0 conv2d (None, 32, 32, 128) 1 max_pooling2d (None, 16, 16, 128) 2 dropout (None, 16, 16, 128) 3 batch_normalization (None, 16, 16, 128) 4 conv2d_1 (None, 16, 16, 256) 5 conv2d_2 (None, 16, 16, 256) 6 conv2d_3 (None, 16, 16, 256) 7 max_pooling2d_1 (None, 8, 8, 256) 8 dropout_1 (None, 8, 8, 256) 9 batch_normalization_1 (None, 8, 8, 256) 10 conv2d_4 (None, 8, 8, 512) 11 conv2d_5 (None, 8, 8, 512) 12 conv2d_6 (None, 8, 8, 512) 13 max_pooling2d_2 (None, 4, 4, 512) 14 dropout_2 (None, 4, 4, 512) 15 batch_normalization_2 (None, 4, 4, 512) 16 conv2d_7 (None, 4, 4, 1024) 17 conv2d_8 (None, 4, 4, 1024) 18 conv2d_9 (None, 4, 4, 1024) 19 max_pooling2d_3 (None, 2, 2, 1024) 20 dropout_3 (None, 2, 2, 1024) 21 batch_normalization_3 (None, 2, 2, 1024) 22 flatten (None, 4096) 23 dense (None, 1024) 24 dropout_4 (None, 1024) 25 batch_normalization_4 (None, 1024) 26 dense_1 (None, 1024) 27 dropout_5 (None, 1024) 28 batch_normalization_5 (None, 1024) 29 dense_2 (None, 1024) 30 dropout_6 (None, 1024) 31 batch_normalization_6 (None, 1024) 32 dense_3 (None, 10)
# redefine model to output right after the convolution layer
outputs = model_16.layers[0].output # get the 1 conv2d layer
model_f = tf.keras.Model(inputs=model_16.inputs, outputs=outputs)
model_f.summary()
Model: "model_3" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_11_input (InputLayer) [(None, 32, 32, 3)] 0 _________________________________________________________________ conv2d_11 (Conv2D) (None, 32, 32, 128) 3584 ================================================================= Total params: 3,584 Trainable params: 3,584 Non-trainable params: 0 _________________________________________________________________
model_21.inputs
[<KerasTensor: shape=(None, 32, 32, 3) dtype=float32 (created by layer 'conv2d_input')>]
img = x_test[2004]
img_tensor = (image.img_to_array(img))
print(img_tensor.shape)
img_tensor= np.expand_dims(img_tensor, axis=0)
plt.imshow(img, cmap='gray')
#plt.axis('off')
plt.show()
(32, 32, 3)
feature_maps = model_f.predict(img_tensor) # the model predict witll provide the feature map for the first
# convolutional layer for a given input image
# plot all 128 filters in an 8 x16 grid
rows = 8
columns =16
ix = 1
plt.figure(figsize=(20,14))
for _ in range(rows): #
for _ in range(columns):
# specify subplot and turn of axis
ax = plt.subplot(rows, columns, ix)
ax.set_xticks([])
ax.set_yticks([])
# channel_image = feature_maps[0, :, :, ix-1]
# channel_image -= channel_image.mean()
# channel_image /= channel_image.std()
# channel_image *= 64
# channel_image += 128
# channel_image = np.clip(channel_image, 0, 255).astype('uint8')
Scaler = MinMaxScaler()
channel_image = Scaler.fit_transform(feature_maps[0 ,:,: , 1-ix])
#channel_image = Scaler.transform(feature_maps[0 ,:,: , 1-ix])
channel_image = channel_image *255
plt.imshow(channel_image, aspect='auto', cmap='viridis') # Feature_maps[] indexing is to get a subset of numpy array for each immage
ix += 1
# show the figure
plt.show()
# redefine model to output right after the first hidden layer
outputs = model_16.layers[1].output
model_f = tf.keras.Model(inputs=model_16.inputs, outputs=outputs)
model_f.summary()
Model: "model_7" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_11_input (InputLayer) [(None, 32, 32, 3)] 0 _________________________________________________________________ conv2d_11 (Conv2D) (None, 32, 32, 128) 3584 _________________________________________________________________ max_pooling2d_11 (MaxPooling (None, 16, 16, 128) 0 ================================================================= Total params: 3,584 Trainable params: 3,584 Non-trainable params: 0 _________________________________________________________________
feature_maps = model_f.predict(img_tensor)
# plot all 128 filters in an 8 x16 grid
rows = 8
columns = 16
ix = 1
plt.figure(figsize=(20,14))
for _ in range(rows): #
for _ in range(columns):
# specify subplot and turn of axis
ax = plt.subplot(8, 16, ix)
ax.set_xticks([])
ax.set_yticks([])
# plot filter channel
Scaler = MinMaxScaler()
channel_image = Scaler.fit_transform(feature_maps[0 ,:,: , 1-ix])
#channel_image = Scaler.transform(feature_maps[0 ,:,: , 1-ix])
channel_image = channel_image *255
plt.imshow(channel_image, cmap='viridis') # Feature_maps[] indexing is to get a subset of numpy array for each immage
ix += 1
# show the figure
plt.show()
# redefine model to output right after the first hidden layer
outputs = model_16.layers[6].output
model_f = tf.keras.Model(inputs=model_16.inputs, outputs=outputs)
model_f.summary()
Model: "model_8" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_11_input (InputLayer) [(None, 32, 32, 3)] 0 _________________________________________________________________ conv2d_11 (Conv2D) (None, 32, 32, 128) 3584 _________________________________________________________________ max_pooling2d_11 (MaxPooling (None, 16, 16, 128) 0 _________________________________________________________________ dropout_15 (Dropout) (None, 16, 16, 128) 0 _________________________________________________________________ batch_normalization_33 (Batc (None, 16, 16, 128) 512 _________________________________________________________________ conv2d_12 (Conv2D) (None, 16, 16, 256) 295168 _________________________________________________________________ conv2d_13 (Conv2D) (None, 16, 16, 256) 590080 _________________________________________________________________ conv2d_14 (Conv2D) (None, 16, 16, 256) 590080 ================================================================= Total params: 1,479,424 Trainable params: 1,479,168 Non-trainable params: 256 _________________________________________________________________
feature_maps = model_f.predict(img_tensor)
# plot all 128 filters in an 8 x16 grid
rows = 16
columns = 16
ix = 1
plt.figure(figsize=(20,14))
for _ in range(rows): #
for _ in range(columns):
# specify subplot and turn of axis
ax = plt.subplot(16, 16, ix)
ax.set_xticks([])
ax.set_yticks([])
# plot filter channel
Scaler = MinMaxScaler()
channel_image = Scaler.fit_transform(feature_maps[0 ,:,: , 1-ix])
#channel_image = Scaler.transform(feature_maps[0 ,:,: , 1-ix])
channel_image = channel_image *255
plt.imshow(channel_image, cmap='viridis') # Feature_maps[] indexing is to get a subset of numpy array for each immage
ix += 1
# show the figure
plt.show()
# redefine model to output right after the first hidden layer
outputs = model_16.layers[7].output
model_f = tf.keras.Model(inputs=model_16.inputs, outputs=outputs)
model_f.summary()
Model: "model_11" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_11_input (InputLayer) [(None, 32, 32, 3)] 0 _________________________________________________________________ conv2d_11 (Conv2D) (None, 32, 32, 128) 3584 _________________________________________________________________ max_pooling2d_11 (MaxPooling (None, 16, 16, 128) 0 _________________________________________________________________ dropout_15 (Dropout) (None, 16, 16, 128) 0 _________________________________________________________________ batch_normalization_33 (Batc (None, 16, 16, 128) 512 _________________________________________________________________ conv2d_12 (Conv2D) (None, 16, 16, 256) 295168 _________________________________________________________________ conv2d_13 (Conv2D) (None, 16, 16, 256) 590080 _________________________________________________________________ conv2d_14 (Conv2D) (None, 16, 16, 256) 590080 _________________________________________________________________ max_pooling2d_12 (MaxPooling (None, 8, 8, 256) 0 ================================================================= Total params: 1,479,424 Trainable params: 1,479,168 Non-trainable params: 256 _________________________________________________________________
feature_maps = model_f.predict(img_tensor)
# plot all 128 filters in an 8 x16 grid
rows = 16
columns = 16
ix = 1
plt.figure(figsize=(20,14))
for _ in range(rows): #
for _ in range(columns):
# specify subplot and turn of axis
ax = plt.subplot(16, 16, ix)
ax.set_xticks([])
ax.set_yticks([])
# plot filter channel
Scaler = MinMaxScaler()
channel_image = Scaler.fit_transform(feature_maps[0 ,:,: , 1-ix])
#channel_image = Scaler.transform(feature_maps[0 ,:,: , 1-ix])
channel_image = channel_image *255
plt.imshow(channel_image, cmap='viridis') # Feature_maps[] indexing is to get a subset of numpy array for each immage
ix += 1
# show the figure
plt.show()
from sklearn.preprocessing import MinMaxScaler
feature_maps = model_f.predict(img_tensor)
scaler = MinMaxScaler()
np.min(scaler.fit_transform(feature_maps[0 ,:,: , 0]))
0.0
(feature_maps[0 ,:,: , 0])
array([[0.375, 0.019, 0.066, 0.086, 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. ],
[0.28 , 0.039, 0.043, 0.103, 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. ],
[0.373, 0. , 0.146, 0.075, 0.049, 0. , 0. , 0. , 0. ,
0. , 0.007, 0.33 , 0.123, 0. , 0. ],
[0.358, 0. , 0.349, 0.043, 0.065, 0. , 0. , 0.062, 0.084,
0.048, 0.223, 0.451, 0.053, 0. , 0. ],
[0.033, 0.017, 0.369, 0.033, 0.068, 0.036, 0. , 0.036, 0. ,
0.029, 0.11 , 0.083, 0.12 , 0.018, 0. ],
[0. , 0.176, 0.415, 0.041, 0.051, 0. , 0. , 0.027, 0.106,
0. , 0.087, 0.035, 0.225, 0.003, 0. ],
[0. , 0.351, 0.344, 0.032, 0.02 , 0. , 0. , 0.071, 0.092,
0.017, 0.041, 0.003, 0.261, 0.003, 0. ],
[0. , 0.111, 0.214, 0.218, 0.056, 0. , 0. , 0.06 , 0. ,
0.192, 0.012, 0. , 0.269, 0.07 , 0. ],
[0. , 0. , 0.133, 0.353, 0. , 0. , 0. , 0. , 0.007,
0.292, 0.249, 0. , 0.313, 0.239, 0. ],
[0. , 0. , 0.26 , 0.444, 0. , 0. , 0. , 0. , 0. ,
0.056, 0.294, 0.142, 0. , 0.256, 0.059],
[0.006, 0. , 0.292, 0.413, 0. , 0. , 0. , 0. , 0. ,
0. , 0.279, 0.315, 0. , 0.104, 0.098],
[0.112, 0. , 0.168, 0.284, 0. , 0.01 , 0. , 0. , 0.056,
0. , 0.131, 0.357, 0. , 0. , 0.021],
[0.247, 0.164, 0.177, 0.131, 0.009, 0.05 , 0.056, 0.065, 0.124,
0. , 0. , 0.352, 0.072, 0. , 0.083],
[0. , 0.022, 0.083, 0.361, 0. , 0. , 0. , 0.04 , 0.025,
0.131, 0.172, 0.142, 0.097, 0. , 0.023],
[0. , 0. , 0.01 , 0.231, 0.014, 0. , 0. , 0. , 0. ,
0. , 0.192, 0.186, 0.355, 0. , 0. ]], dtype=float32)
# redefine model to output right after the first hidden layer
outputs = model.layers[6].output
model_f = tf.keras.Model(inputs=model.inputs, outputs=outputs)
model_f.summary()
Model: "model_3" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_input (InputLayer) [(None, 32, 32, 3)] 0 _________________________________________________________________ conv2d (Conv2D) (None, 30, 30, 128) 3584 _________________________________________________________________ max_pooling2d (MaxPooling2D) (None, 15, 15, 128) 0 _________________________________________________________________ dropout (Dropout) (None, 15, 15, 128) 0 _________________________________________________________________ conv2d_1 (Conv2D) (None, 13, 13, 256) 295168 _________________________________________________________________ max_pooling2d_1 (MaxPooling2 (None, 6, 6, 256) 0 _________________________________________________________________ dropout_1 (Dropout) (None, 6, 6, 256) 0 _________________________________________________________________ conv2d_2 (Conv2D) (None, 4, 4, 512) 1180160 ================================================================= Total params: 1,478,912 Trainable params: 1,478,912 Non-trainable params: 0 _________________________________________________________________
feature_maps = model_f.predict(img_tensor)
# plot all 64 maps in an 16x8 size
rows = 8
columns = 16
ix = 1
plt.figure(figsize=(14,10))
for _ in range(columns): #
for _ in range(rows):
# specify subplot and turn of axis
ax = plt.subplot(rows, columns, ix)
ax.set_xticks([])
ax.set_yticks([])
# plot filter channel in grayscale
Scaler = MinMaxScaler()
channel_image = Scaler.fit_transform(feature_maps[0 ,:,: , 1-ix])
#channel_image = Scaler.transform(feature_maps[0 ,:,: , 1-ix])
channel_image = channel_image *255
plt.imshow(channel_image, cmap='viridis') # Feature_maps[] indexing is to get a subset of numpy array for each immage
ix += 1
# show the figure
# show the figure
plt.figure(figsize=(40,25))
plt.show()
<Figure size 2880x1800 with 0 Axes>
tf.keras.models.save_model(model_21, filepath= '/content/gdrive/My Drive/Colab Notebooks/models' )
INFO:tensorflow:Assets written to: /content/gdrive/My Drive/Colab Notebooks/models/assets
!ls saved_model
ls: cannot access 'saved_model': No such file or directory
tf.keras.models.load_model('/content/gdrive/My Drive/Colab Notebooks/models/assets')
--------------------------------------------------------------------------- OSError Traceback (most recent call last) <ipython-input-163-31c9d7a1152f> in <module>() ----> 1 tf.keras.models.load_model('/content/gdrive/My Drive/Colab Notebooks/models/assets') /usr/local/lib/python3.7/dist-packages/keras/saving/save.py in load_model(filepath, custom_objects, compile, options) 203 filepath = path_to_string(filepath) 204 if isinstance(filepath, str): --> 205 return saved_model_load.load(filepath, compile, options) 206 207 raise IOError( /usr/local/lib/python3.7/dist-packages/keras/saving/saved_model/load.py in load(path, compile, options) 106 # Look for metadata file or parse the SavedModel 107 metadata = saved_metadata_pb2.SavedMetadata() --> 108 meta_graph_def = tf.__internal__.saved_model.parse_saved_model(path).meta_graphs[0] 109 object_graph_def = meta_graph_def.object_graph_def 110 path_to_metadata_pb = os.path.join(path, constants.SAVED_METADATA_PATH) /usr/local/lib/python3.7/dist-packages/tensorflow/python/saved_model/loader_impl.py in parse_saved_model(export_dir) 119 "SavedModel file does not exist at: %s%s{%s|%s}" % 120 (export_dir, os.path.sep, constants.SAVED_MODEL_FILENAME_PBTXT, --> 121 constants.SAVED_MODEL_FILENAME_PB)) 122 123 OSError: SavedModel file does not exist at: /content/gdrive/My Drive/Colab Notebooks/models/assets/{saved_model.pbtxt|saved_model.pb}
from google.colab import drive
drive.mount('/content/drive')
Mounted at /content/drive
# Extracts the outputs of all layers:
layer_outputs = [layer.output for layer in model_21.layers]
# Creates a model that will return these outputs, given the model input:
activation_model = models.Model(inputs=model_21.input, outputs=layer_outputs)
# Get activation values for the last dense layer
activations = activation_model.predict(trdata.flow(testdata, shuffle= False))
dense_layer_activations = activations[-3]
output_layer_activations = activations[-1]
t=['28s','33s','59s','34s','21s','76s','241s','290s','233s','1332s','3686s']
vl = [1.6,1.81,1.87, 1.74,1.53,1.70,0.74,0.74,0.73,0.69,0.35]
tl = [0.85, 0.55,0.000,0.004,0.006, 0.072,0.10,0.106, 0.006,0.02, 0.22 ]
va = [48, 49.7, 72.9,73.12, 45.74, 43.37,81.82, 82, 84.18, 85.88, 89.16 ]
ta = [69.8, 80.6, 100, 99.94,63, 46, 99.8,99.7,99.82,96.5, 93]
tea= [49,51,73,73 ,47,44,81,79,82,86,88]
m = ['Exp 1', 'Exp 2', 'Exp 3', 'Exp 4','Exp 5', 'Exp 6','Exp 7','Exp 8', 'Exp 9','Exp 10', 'Exp 11']
d = ['MLP 2 layer (512,512)', 'MLP 3 layer (1024 ,516, 256)','CNN 2 layer (64, 128) + 1 Dense layer 384', 'CNN 3 Layer (64,128,256)+ 1 Dense layer 384',' MLP 5 layer (512 x 5)', ' MLP 5 Layer (512 x 5' , 'CNN 3 layer (128, 256, 512)+ 1 Dense layer 384',
'CNN 4 layer (128, 256, 512, 1024)+ 2 Dense layer ( 256, 512)', 'CNN 4 layer (128, 256, 512, 1024) + \n 2 Dense layer ( 256, 512)', 'CNN 10 layer (128, 256 x 3 stacked, 512 x 3 stacked, 1024 x 3 stacked) +\n 3 Dense layer ( 1024, 1024,1024)',
'CNN 10 layer (128, 256 x 3 stacked, 512 x 3 stacked, 1024 x 3 stacked) + \n 3 Dense layer ( 1024, 1024,1024)' ]
op= ['RMSprop','RMSprop','RMSprop','RMSprop','Adam', 'Adam', 'RMSprop', 'RMSprop','Adam','Adam', 'Adam']
bs= [512,512,512,512,256,256,512,512,512,512,64]
re=['-','-','-','-','L2, Batch Normalization', 'L2, Batch Normalization, Dropout', 'L2, Batch Normalization, Dropout','L2, Batch Normalization, Dropout, Padding', 'L2, Batch Normalization, Dropout, Padding', 'L2, Batch Normalization, Dropout, Padding',
'L2, Batch Normalization, Dropout, Padding,\n Image Augmentation']
import pandas as pd
df = pd.DataFrame([m,d,re,op,bs,tea,va,ta,vl,tl,t]).T
df.columns = ['Experiments','Model Type', 'Regularization Hyperparameters', ' Optimizer' ,'Batch Size','Test Accuracy %', 'Validation Accuracy%', 'Train Accuracy%', 'Validation Loss', 'Training Loss', 'Training Time']
df = df.sort_values('Test Accuracy %', ascending=False)
df =df.set_index('Experiments')
print(tabulate(df, headers='keys', tablefmt='fancy_grid'))
╒═══════════════╤═══════════════════════════════════════════════════════════════════════════╤════════════════════════════════════════════╤══════════════╤══════════════╤═══════════════════╤════════════════════════╤═══════════════════╤═══════════════════╤═════════════════╤═════════════════╕ │ Experiments │ Model Type │ Regularization Hyperparameters │ Optimizer │ Batch Size │ Test Accuracy % │ Validation Accuracy% │ Train Accuracy% │ Validation Loss │ Training Loss │ Training Time │ ╞═══════════════╪═══════════════════════════════════════════════════════════════════════════╪════════════════════════════════════════════╪══════════════╪══════════════╪═══════════════════╪════════════════════════╪═══════════════════╪═══════════════════╪═════════════════╪═════════════════╡ │ Exp 11 │ CNN 10 layer (128, 256 x 3 stacked, 512 x 3 stacked, 1024 x 3 stacked) + │ L2, Batch Normalization, Dropout, Padding, │ Adam │ 64 │ 88 │ 89.16 │ 93 │ 0.35 │ 0.22 │ 3686s │ │ │ 3 Dense layer ( 1024, 1024,1024) │ Image Augmentation │ │ │ │ │ │ │ │ │ ├───────────────┼───────────────────────────────────────────────────────────────────────────┼────────────────────────────────────────────┼──────────────┼──────────────┼───────────────────┼────────────────────────┼───────────────────┼───────────────────┼─────────────────┼─────────────────┤ │ Exp 10 │ CNN 10 layer (128, 256 x 3 stacked, 512 x 3 stacked, 1024 x 3 stacked) + │ L2, Batch Normalization, Dropout, Padding │ Adam │ 512 │ 86 │ 85.88 │ 96.5 │ 0.69 │ 0.02 │ 1332s │ │ │ 3 Dense layer ( 1024, 1024,1024) │ │ │ │ │ │ │ │ │ │ ├───────────────┼───────────────────────────────────────────────────────────────────────────┼────────────────────────────────────────────┼──────────────┼──────────────┼───────────────────┼────────────────────────┼───────────────────┼───────────────────┼─────────────────┼─────────────────┤ │ Exp 9 │ CNN 4 layer (128, 256, 512, 1024) + │ L2, Batch Normalization, Dropout, Padding │ Adam │ 512 │ 82 │ 84.18 │ 99.82 │ 0.73 │ 0.006 │ 233s │ │ │ 2 Dense layer ( 256, 512) │ │ │ │ │ │ │ │ │ │ ├───────────────┼───────────────────────────────────────────────────────────────────────────┼────────────────────────────────────────────┼──────────────┼──────────────┼───────────────────┼────────────────────────┼───────────────────┼───────────────────┼─────────────────┼─────────────────┤ │ Exp 7 │ CNN 3 layer (128, 256, 512)+ 1 Dense layer 384 │ L2, Batch Normalization, Dropout │ RMSprop │ 512 │ 81 │ 81.82 │ 99.8 │ 0.74 │ 0.1 │ 241s │ ├───────────────┼───────────────────────────────────────────────────────────────────────────┼────────────────────────────────────────────┼──────────────┼──────────────┼───────────────────┼────────────────────────┼───────────────────┼───────────────────┼─────────────────┼─────────────────┤ │ Exp 8 │ CNN 4 layer (128, 256, 512, 1024)+ 2 Dense layer ( 256, 512) │ L2, Batch Normalization, Dropout, Padding │ RMSprop │ 512 │ 79 │ 82 │ 99.7 │ 0.74 │ 0.106 │ 290s │ ├───────────────┼───────────────────────────────────────────────────────────────────────────┼────────────────────────────────────────────┼──────────────┼──────────────┼───────────────────┼────────────────────────┼───────────────────┼───────────────────┼─────────────────┼─────────────────┤ │ Exp 3 │ CNN 2 layer (64, 128) + 1 Dense layer 384 │ - │ RMSprop │ 512 │ 73 │ 72.9 │ 100 │ 1.87 │ 0 │ 59s │ ├───────────────┼───────────────────────────────────────────────────────────────────────────┼────────────────────────────────────────────┼──────────────┼──────────────┼───────────────────┼────────────────────────┼───────────────────┼───────────────────┼─────────────────┼─────────────────┤ │ Exp 4 │ CNN 3 Layer (64,128,256)+ 1 Dense layer 384 │ - │ RMSprop │ 512 │ 73 │ 73.12 │ 99.94 │ 1.74 │ 0.004 │ 34s │ ├───────────────┼───────────────────────────────────────────────────────────────────────────┼────────────────────────────────────────────┼──────────────┼──────────────┼───────────────────┼────────────────────────┼───────────────────┼───────────────────┼─────────────────┼─────────────────┤ │ Exp 2 │ MLP 3 layer (1024 ,516, 256) │ - │ RMSprop │ 512 │ 51 │ 49.7 │ 80.6 │ 1.81 │ 0.55 │ 33s │ ├───────────────┼───────────────────────────────────────────────────────────────────────────┼────────────────────────────────────────────┼──────────────┼──────────────┼───────────────────┼────────────────────────┼───────────────────┼───────────────────┼─────────────────┼─────────────────┤ │ Exp 1 │ MLP 2 layer (512,512) │ - │ RMSprop │ 512 │ 49 │ 48 │ 69.8 │ 1.6 │ 0.85 │ 28s │ ├───────────────┼───────────────────────────────────────────────────────────────────────────┼────────────────────────────────────────────┼──────────────┼──────────────┼───────────────────┼────────────────────────┼───────────────────┼───────────────────┼─────────────────┼─────────────────┤ │ Exp 5 │ MLP 5 layer (512 x 5) │ L2, Batch Normalization │ Adam │ 256 │ 47 │ 45.74 │ 63 │ 1.53 │ 0.006 │ 21s │ ├───────────────┼───────────────────────────────────────────────────────────────────────────┼────────────────────────────────────────────┼──────────────┼──────────────┼───────────────────┼────────────────────────┼───────────────────┼───────────────────┼─────────────────┼─────────────────┤ │ Exp 6 │ MLP 5 Layer (512 x 5 │ L2, Batch Normalization, Dropout │ Adam │ 256 │ 44 │ 43.37 │ 46 │ 1.7 │ 0.072 │ 76s │ ╘═══════════════╧═══════════════════════════════════════════════════════════════════════════╧════════════════════════════════════════════╧══════════════╧══════════════╧═══════════════════╧════════════════════════╧═══════════════════╧═══════════════════╧═════════════════╧═════════════════╛